code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import igraph as ig import numpy as np import pickle import pandas as pd import matplotlib.pyplot as plt import matplotlib.patches as mpatches from itertools import zip_longest from itertools import islice from tqdm import tqdm import os from random import sample # + ## Matplotlib defaults ## colors = {"1980": "#aa0f35", "1985": "#fdc799", "1990": "#abdbfc", "1995": "#a0a75c", "2000": "#603734", "2005": "#009ecb", "2010": "#430057", "2015": "#f89b00"} font = {'family' : 'normal', 'weight' : 'bold', 'size' : 22} plt.rcParams.update({'font.size': 22}) # - # # Preferential Attachment Graphing # # Goal = graph preferential attachment for all 5-year increments from 1980 to 2019 # + start = 1980 end = 2005 pairs = [(1980, 1984), (1985, 1989), (1990, 1994), (1995, 1999), (2000, 2004), (2005, 2009), (2010,2014), (2015, 2019)] # - def plot_best_fit(start, stop): #Read in pickled files - from Cronin relative file path full_id_degrees = pickle.load(file=open("Data/Degrees/full_id_degrees_" + str(start) + "_" + str(stop) + ".p", "rb")) pref_attach_dict = pickle.load(file=open("Data/pref_attach_dict_" + str(start) + "_" + str(stop) + ".p", "rb")) #Read in pickled files - from cronin file path # full_id_degrees = pickle.load(file=open("Z:/group/<NAME>/Projects/Patents/Data/full_id_degrees_1980_1989.p", "rb")) # pref_attach_dict = pickle.load(file=open("Z:/group/<NAME>/Projects/Patents/Data/pref_attach_dict_1980_1989.p", "rb")) #Create two list of initial degrees initial_degrees = [] for key, value in full_id_degrees.items(): initial_degrees.append(value[0]) plt.scatter(x=initial_degrees, y=list(pref_attach_dict.values()), color=colors[str(start)], alpha=0.2) #Fit line of best fit m,b = np.polyfit(initial_degrees, list(pref_attach_dict.values()), 1) plt.plot(np.arange(0,max(initial_degrees),1), m*np.arange(0,max(initial_degrees),1) + b, label=str(start) + " - " + str(stop) , color=colors[str(start)]) # + # Best fit lines only (& normalized) plt.figure(figsize=(8,8)) for pair in [[2015, 2019]]: plot_best_fit(pair[0], pair[1]) plt.xlabel("Initial Degree") plt.ylabel("Preferential Attachment Index") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.title("2015 To 2019") # plt.xlim([0,1000]) # plt.ylim([0,1000]) plt.show() # - mb_pairs = [[0.9775505820281024,3.6911788708586846], [0.8585131147329826,4.217353519055628], [1.0897414910508383,4.6551801537076605], [1.0291041384269501,5.702995243832083], [1.5637915240218274,18.08341830551379], [1.12369714513705,15.94782609638979], [1.1358915457365812,7.702558377590308], [1.1986916622559294,6.89683552196379]] # + plt.figure(figsize=(10,10)) count = 0 for pair in mb_pairs: start, stop = pairs[count] plt.plot(np.arange(0,1001,1), pair[0]*np.arange(0,1001,1) + pair[1], label=str(start) + " - " + str(stop) + " , slope=" + str(round(pair[0],3)), color=colors[str(start)]) count += 1 #Linear Fit plt.plot(np.arange(0,1001,1), np.arange(0,1001,1), "--", label="Linear Fit", color="#aaaaaa") plt.legend(prop={"size":14}) plt.xlabel("Initial Degree - Normalized") plt.ylabel("Avg Preferential Attachement Index, Normalized", fontsize=22) # - def find_best_fit_scatter(start, stop, colors): #Read in pickled files - from Cronin relative file path full_id_degrees = pickle.load(file=open("Data/Degrees/full_id_degrees_" + str(start) + "_" + str(stop) + ".p", "rb")) pref_attach_dict = pickle.load(file=open("Data/pref_attach_dict_" + str(start) + "_" + str(stop) + ".p", "rb")) #Read in pickled files - from cronin file path # full_id_degrees = pickle.load(file=open("Z:/group/<NAME>/Projects/Patents/Data/full_id_degrees_1980_1989.p", "rb")) # pref_attach_dict = pickle.load(file=open("Z:/group/<NAME>/Projects/Patents/Data/pref_attach_dict_1980_1989.p", "rb")) #Create two list of initial degrees initial_degrees = [] for key, value in full_id_degrees.items(): initial_degrees.append(value[0]) #Fit line of best fit m,b = np.polyfit(initial_degrees, list(pref_attach_dict.values()), 1) print(m,b) initial_degrees = np.array(initial_degrees) plt.plot(np.arange(0,400000,1), m*np.arange(0,400000,1) + b, label=str(start) + " - " + str(stop), color=colors[str(start)]) plt.scatter(x=initial_degrees, y=pref_attach_dict.values(), alpha=0.2, color=colors[str(start)]) # + # Best fit lines only (& normalized) plt.figure(figsize=(12,12)) for pair in pairs: find_best_fit_scatter(pair[0], pair[1], colors) plt.xlabel("Initial Degree") plt.ylabel("Preferential Attachment Index") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.title("1980 - 2019, 5 Year Increments") plt.legend() # plt.xlim([0,1000]) # plt.ylim([0,1000]) plt.show()
pref_attachment_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural Machine Translation # # Welcome to your first programming assignment for this week! # # * You will build a Neural Machine Translation (NMT) model to translate human-readable dates ("25th of June, 2009") into machine-readable dates ("2009-06-25"). # * You will do this using an attention model, one of the most sophisticated sequence-to-sequence models. # # This notebook was produced together with NVIDIA's Deep Learning Institute. # ## <font color='darkblue'>Updates</font> # # #### If you were working on the notebook before this update... # * The current notebook is version "4a". # * You can find your original work saved in the notebook with the previous version name ("v4") # * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. # # #### List of updates # * Clarified names of variables to be consistent with the lectures and consistent within the assignment # - pre-attention bi-directional LSTM: the first LSTM that processes the input data. # - 'a': the hidden state of the pre-attention LSTM. # - post-attention LSTM: the LSTM that outputs the translation. # - 's': the hidden state of the post-attention LSTM. # - energies "e". The output of the dense function that takes "a" and "s" as inputs. # - All references to "output activation" are updated to "hidden state". # - "post-activation" sequence model is updated to "post-attention sequence model". # - 3.1: "Getting the activations from the Network" renamed to "Getting the attention weights from the network." # - Appropriate mentions of "activation" replaced "attention weights." # - Sequence of alphas corrected to be a sequence of "a" hidden states. # * one_step_attention: # - Provides sample code for each Keras layer, to show how to call the functions. # - Reminds students to provide the list of hidden states in a specific order, in order to pause the autograder. # * model # - Provides sample code for each Keras layer, to show how to call the functions. # - Added a troubleshooting note about handling errors. # - Fixed typo: outputs should be of length 10 and not 11. # * define optimizer and compile model # - Provides sample code for each Keras layer, to show how to call the functions. # # * Spelling, grammar and wording corrections. # Let's load all the packages you will need for this assignment. # + from keras.layers import Bidirectional, Concatenate, Permute, Dot, Input, LSTM, Multiply from keras.layers import RepeatVector, Dense, Activation, Lambda from keras.optimizers import Adam from keras.utils import to_categorical from keras.models import load_model, Model import keras.backend as K import numpy as np from faker import Faker import random from tqdm import tqdm from babel.dates import format_date from nmt_utils import * import matplotlib.pyplot as plt # %matplotlib inline # - # ## 1 - Translating human readable dates into machine readable dates # # * The model you will build here could be used to translate from one language to another, such as translating from English to Hindi. # * However, language translation requires massive datasets and usually takes days of training on GPUs. # * To give you a place to experiment with these models without using massive datasets, we will perform a simpler "date translation" task. # * The network will input a date written in a variety of possible formats (*e.g. "the 29th of August 1958", "03/30/1968", "24 JUNE 1987"*) # * The network will translate them into standardized, machine readable dates (*e.g. "1958-08-29", "1968-03-30", "1987-06-24"*). # * We will have the network learn to output dates in the common machine-readable format YYYY-MM-DD. # # <!-- # Take a look at [nmt_utils.py](./nmt_utils.py) to see all the formatting. Count and figure out how the formats work, you will need this knowledge later. !--> # ### 1.1 - Dataset # # We will train the model on a dataset of 10,000 human readable dates and their equivalent, standardized, machine readable dates. Let's run the following cells to load the dataset and print some examples. m = 10000 dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(m) dataset[:10] # You've loaded: # - `dataset`: a list of tuples of (human readable date, machine readable date). # - `human_vocab`: a python dictionary mapping all characters used in the human readable dates to an integer-valued index. # - `machine_vocab`: a python dictionary mapping all characters used in machine readable dates to an integer-valued index. # - **Note**: These indices are not necessarily consistent with `human_vocab`. # - `inv_machine_vocab`: the inverse dictionary of `machine_vocab`, mapping from indices back to characters. # # Let's preprocess the data and map the raw text data into the index values. # - We will set Tx=30 # - We assume Tx is the maximum length of the human readable date. # - If we get a longer input, we would have to truncate it. # - We will set Ty=10 # - "YYYY-MM-DD" is 10 characters long. # + Tx = 30 Ty = 10 X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty) print("X.shape:", X.shape) print("Y.shape:", Y.shape) print("Xoh.shape:", Xoh.shape) print("Yoh.shape:", Yoh.shape) # - # You now have: # - `X`: a processed version of the human readable dates in the training set. # - Each character in X is replaced by an index (integer) mapped to the character using `human_vocab`. # - Each date is padded to ensure a length of $T_x$ using a special character (< pad >). # - `X.shape = (m, Tx)` where m is the number of training examples in a batch. # - `Y`: a processed version of the machine readable dates in the training set. # - Each character is replaced by the index (integer) it is mapped to in `machine_vocab`. # - `Y.shape = (m, Ty)`. # - `Xoh`: one-hot version of `X` # - Each index in `X` is converted to the one-hot representation (if the index is 2, the one-hot version has the index position 2 set to 1, and the remaining positions are 0. # - `Xoh.shape = (m, Tx, len(human_vocab))` # - `Yoh`: one-hot version of `Y` # - Each index in `Y` is converted to the one-hot representation. # - `Yoh.shape = (m, Tx, len(machine_vocab))`. # - `len(machine_vocab) = 11` since there are 10 numeric digits (0 to 9) and the `-` symbol. # * Let's also look at some examples of preprocessed training examples. # * Feel free to play with `index` in the cell below to navigate the dataset and see how source/target dates are preprocessed. index = 0 print("Source date:", dataset[index][0]) print("Target date:", dataset[index][1]) print() print("Source after preprocessing (indices):", X[index]) print("Target after preprocessing (indices):", Y[index]) print() print("Source after preprocessing (one-hot):", Xoh[index]) print("Target after preprocessing (one-hot):", Yoh[index]) # ## 2 - Neural machine translation with attention # # * If you had to translate a book's paragraph from French to English, you would not read the whole paragraph, then close the book and translate. # * Even during the translation process, you would read/re-read and focus on the parts of the French paragraph corresponding to the parts of the English you are writing down. # * The attention mechanism tells a Neural Machine Translation model where it should pay attention to at any step. # # # ### 2.1 - Attention mechanism # # In this part, you will implement the attention mechanism presented in the lecture videos. # * Here is a figure to remind you how the model works. # * The diagram on the left shows the attention model. # * The diagram on the right shows what one "attention" step does to calculate the attention variables $\alpha^{\langle t, t' \rangle}$. # * The attention variables $\alpha^{\langle t, t' \rangle}$ are used to compute the context variable $context^{\langle t \rangle}$ for each timestep in the output ($t=1, \ldots, T_y$). # # <table> # <td> # <img src="images/attn_model.png" style="width:500;height:500px;"> <br> # </td> # <td> # <img src="images/attn_mechanism.png" style="width:500;height:500px;"> <br> # </td> # </table> # <caption><center> **Figure 1**: Neural machine translation with attention</center></caption> # # Here are some properties of the model that you may notice: # # #### Pre-attention and Post-attention LSTMs on both sides of the attention mechanism # - There are two separate LSTMs in this model (see diagram on the left): pre-attention and post-attention LSTMs. # - *Pre-attention* Bi-LSTM is the one at the bottom of the picture is a Bi-directional LSTM and comes *before* the attention mechanism. # - The attention mechanism is shown in the middle of the left-hand diagram. # - The pre-attention Bi-LSTM goes through $T_x$ time steps # - *Post-attention* LSTM: at the top of the diagram comes *after* the attention mechanism. # - The post-attention LSTM goes through $T_y$ time steps. # # - The post-attention LSTM passes the hidden state $s^{\langle t \rangle}$ and cell state $c^{\langle t \rangle}$ from one time step to the next. # #### An LSTM has both a hidden state and cell state # * In the lecture videos, we were using only a basic RNN for the post-attention sequence model # * This means that the state captured by the RNN was outputting only the hidden state $s^{\langle t\rangle}$. # * In this assignment, we are using an LSTM instead of a basic RNN. # * So the LSTM has both the hidden state $s^{\langle t\rangle}$ and the cell state $c^{\langle t\rangle}$. # #### Each time step does not use predictions from the previous time step # * Unlike previous text generation examples earlier in the course, in this model, the post-attention LSTM at time $t$ does not take the previous time step's prediction $y^{\langle t-1 \rangle}$ as input. # * The post-attention LSTM at time 't' only takes the hidden state $s^{\langle t\rangle}$ and cell state $c^{\langle t\rangle}$ as input. # * We have designed the model this way because unlike language generation (where adjacent characters are highly correlated) there isn't as strong a dependency between the previous character and the next character in a YYYY-MM-DD date. # #### Concatenation of hidden states from the forward and backward pre-attention LSTMs # - $\overrightarrow{a}^{\langle t \rangle}$: hidden state of the forward-direction, pre-attention LSTM. # - $\overleftarrow{a}^{\langle t \rangle}$: hidden state of the backward-direction, pre-attention LSTM. # - $a^{\langle t \rangle} = [\overrightarrow{a}^{\langle t \rangle}, \overleftarrow{a}^{\langle t \rangle}]$: the concatenation of the activations of both the forward-direction $\overrightarrow{a}^{\langle t \rangle}$ and backward-directions $\overleftarrow{a}^{\langle t \rangle}$ of the pre-attention Bi-LSTM. # #### Computing "energies" $e^{\langle t, t' \rangle}$ as a function of $s^{\langle t-1 \rangle}$ and $a^{\langle t' \rangle}$ # - Recall in the lesson videos "Attention Model", at time 6:45 to 8:16, the definition of "e" as a function of $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$. # - "e" is called the "energies" variable. # - $s^{\langle t-1 \rangle}$ is the hidden state of the post-attention LSTM # - $a^{\langle t' \rangle}$ is the hidden state of the pre-attention LSTM. # - $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$ are fed into a simple neural network, which learns the function to output $e^{\langle t, t' \rangle}$. # - $e^{\langle t, t' \rangle}$ is then used when computing the attention $a^{\langle t, t' \rangle}$ that $y^{\langle t \rangle}$ should pay to $a^{\langle t' \rangle}$. # - The diagram on the right of figure 1 uses a `RepeatVector` node to copy $s^{\langle t-1 \rangle}$'s value $T_x$ times. # - Then it uses `Concatenation` to concatenate $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$. # - The concatenation of $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$ is fed into a "Dense" layer, which computes $e^{\langle t, t' \rangle}$. # - $e^{\langle t, t' \rangle}$ is then passed through a softmax to compute $\alpha^{\langle t, t' \rangle}$. # - Note that the diagram doesn't explicitly show variable $e^{\langle t, t' \rangle}$, but $e^{\langle t, t' \rangle}$ is above the Dense layer and below the Softmax layer in the diagram in the right half of figure 1. # - We'll explain how to use `RepeatVector` and `Concatenation` in Keras below. # ### Implementation Details # # Let's implement this neural translator. You will start by implementing two functions: `one_step_attention()` and `model()`. # # #### one_step_attention # * The inputs to the one_step_attention at time step $t$ are: # - $[a^{<1>},a^{<2>}, ..., a^{<T_x>}]$: all hidden states of the pre-attention Bi-LSTM. # - $s^{<t-1>}$: the previous hidden state of the post-attention LSTM # * one_step_attention computes: # - $[\alpha^{<t,1>},\alpha^{<t,2>}, ..., \alpha^{<t,T_x>}]$: the attention weights # - $context^{ \langle t \rangle }$: the context vector: # # $$context^{<t>} = \sum_{t' = 1}^{T_x} \alpha^{<t,t'>}a^{<t'>}\tag{1}$$ # # ##### Clarifying 'context' and 'c' # - In the lecture videos, the context was denoted $c^{\langle t \rangle}$ # - In the assignment, we are calling the context $context^{\langle t \rangle}$. # - This is to avoid confusion with the post-attention LSTM's internal memory cell variable, which is also denoted $c^{\langle t \rangle}$. # #### Implement `one_step_attention` # # **Exercise**: Implement `one_step_attention()`. # # * The function `model()` will call the layers in `one_step_attention()` $T_y$ using a for-loop. # * It is important that all $T_y$ copies have the same weights. # * It should not reinitialize the weights every time. # * In other words, all $T_y$ steps should have shared weights. # * Here's how you can implement layers with shareable weights in Keras: # 1. Define the layer objects in a variable scope that is outside of the `one_step_attention` function. For example, defining the objects as global variables would work. # - Note that defining these variables inside the scope of the function `model` would technically work, since `model` will then call the `one_step_attention` function. For the purposes of making grading and troubleshooting easier, we are defining these as global variables. Note that the automatic grader will expect these to be global variables as well. # 2. Call these objects when propagating the input. # * We have defined the layers you need as global variables. # * Please run the following cells to create them. # * Please note that the automatic grader expects these global variables with the given variable names. For grading purposes, please do not rename the global variables. # * Please check the Keras documentation to learn more about these layers. The layers are functions. Below are examples of how to call these functions. # * [RepeatVector()](https://keras.io/layers/core/#repeatvector) # ```Python # var_repeated = repeat_layer(var1) # ``` # * [Concatenate()](https://keras.io/layers/merge/#concatenate) # ```Python # concatenated_vars = concatenate_layer([var1,var2,var3]) # ``` # * [Dense()](https://keras.io/layers/core/#dense) # ```Python # var_out = dense_layer(var_in) # ``` # * [Activation()](https://keras.io/layers/core/#activation) # ```Python # activation = activation_layer(var_in) # ``` # * [Dot()](https://keras.io/layers/merge/#dot) # ```Python # dot_product = dot_layer([var1,var2]) # ``` # Defined shared layers as global variables repeator = RepeatVector(Tx) concatenator = Concatenate(axis=-1) densor1 = Dense(10, activation = "tanh") densor2 = Dense(1, activation = "relu") activator = Activation(softmax, name='attention_weights') # We are using a custom softmax(axis = 1) loaded in this notebook dotor = Dot(axes = 1) # + # GRADED FUNCTION: one_step_attention def one_step_attention(a, s_prev): """ Performs one step of attention: Outputs a context vector computed as a dot product of the attention weights "alphas" and the hidden states "a" of the Bi-LSTM. Arguments: a -- hidden state output of the Bi-LSTM, numpy-array of shape (m, Tx, 2*n_a) s_prev -- previous hidden state of the (post-attention) LSTM, numpy-array of shape (m, n_s) Returns: context -- context vector, input of the next (post-attention) LSTM cell """ ### START CODE HERE ### # Use repeator to repeat s_prev to be of shape (m, Tx, n_s) so that you can concatenate it with all hidden states "a" (≈ 1 line) s_prev = repeator(s_prev) # Use concatenator to concatenate a and s_prev on the last axis (≈ 1 line) # For grading purposes, please list 'a' first and 's_prev' second, in this order. concat = concatenator([a, s_prev]) # Use densor1 to propagate concat through a small fully-connected neural network to compute the "intermediate energies" variable e. (≈1 lines) e = densor1(concat) # Use densor2 to propagate e through a small fully-connected neural network to compute the "energies" variable energies. (≈1 lines) energies = densor2(e) # Use "activator" on "energies" to compute the attention weights "alphas" (≈ 1 line) alphas = activator(energies) # Use dotor together with "alphas" and "a" to compute the context vector to be given to the next (post-attention) LSTM-cell (≈ 1 line) context = dotor([alphas, a]) ### END CODE HERE ### return context # - # You will be able to check the expected output of `one_step_attention()` after you've coded the `model()` function. # #### model # * `model` first runs the input through a Bi-LSTM to get $[a^{<1>},a^{<2>}, ..., a^{<T_x>}]$. # * Then, `model` calls `one_step_attention()` $T_y$ times using a `for` loop. At each iteration of this loop: # - It gives the computed context vector $context^{<t>}$ to the post-attention LSTM. # - It runs the output of the post-attention LSTM through a dense layer with softmax activation. # - The softmax generates a prediction $\hat{y}^{<t>}$. # **Exercise**: Implement `model()` as explained in figure 1 and the text above. Again, we have defined global layers that will share weights to be used in `model()`. # + n_a = 32 # number of units for the pre-attention, bi-directional LSTM's hidden state 'a' n_s = 64 # number of units for the post-attention LSTM's hidden state "s" # Please note, this is the post attention LSTM cell. # For the purposes of passing the automatic grader # please do not modify this global variable. This will be corrected once the automatic grader is also updated. post_activation_LSTM_cell = LSTM(n_s, return_state = True) # post-attention LSTM output_layer = Dense(len(machine_vocab), activation=softmax) # - # Now you can use these layers $T_y$ times in a `for` loop to generate the outputs, and their parameters will not be reinitialized. You will have to carry out the following steps: # # 1. Propagate the input `X` into a bi-directional LSTM. # * [Bidirectional](https://keras.io/layers/wrappers/#bidirectional) # * [LSTM](https://keras.io/layers/recurrent/#lstm) # * Remember that we want the LSTM to return a full sequence instead of just the last hidden state. # # Sample code: # # ```Python # sequence_of_hidden_states = Bidirectional(LSTM(units=..., return_sequences=...))(the_input_X) # ``` # # 2. Iterate for $t = 0, \cdots, T_y-1$: # 1. Call `one_step_attention()`, passing in the sequence of hidden states $[a^{\langle 1 \rangle},a^{\langle 2 \rangle}, ..., a^{ \langle T_x \rangle}]$ from the pre-attention bi-directional LSTM, and the previous hidden state $s^{<t-1>}$ from the post-attention LSTM to calculate the context vector $context^{<t>}$. # 2. Give $context^{<t>}$ to the post-attention LSTM cell. # - Remember to pass in the previous hidden-state $s^{\langle t-1\rangle}$ and cell-states $c^{\langle t-1\rangle}$ of this LSTM # * This outputs the new hidden state $s^{<t>}$ and the new cell state $c^{<t>}$. # # Sample code: # ```Python # next_hidden_state, _ , next_cell_state = # post_activation_LSTM_cell(inputs=..., initial_state=[prev_hidden_state, prev_cell_state]) # ``` # Please note that the layer is actually the "post attention LSTM cell". For the purposes of passing the automatic grader, please do not modify the naming of this global variable. This will be fixed when we deploy updates to the automatic grader. # 3. Apply a dense, softmax layer to $s^{<t>}$, get the output. # Sample code: # ```Python # output = output_layer(inputs=...) # ``` # 4. Save the output by adding it to the list of outputs. # # 3. Create your Keras model instance. # * It should have three inputs: # * `X`, the one-hot encoded inputs to the model, of shape ($T_{x}, humanVocabSize)$ # * $s^{\langle 0 \rangle}$, the initial hidden state of the post-attention LSTM # * $c^{\langle 0 \rangle}$), the initial cell state of the post-attention LSTM # * The output is the list of outputs. # Sample code # ```Python # model = Model(inputs=[...,...,...], outputs=...) # ``` # + # GRADED FUNCTION: model def model(Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size): """ Arguments: Tx -- length of the input sequence Ty -- length of the output sequence n_a -- hidden state size of the Bi-LSTM n_s -- hidden state size of the post-attention LSTM human_vocab_size -- size of the python dictionary "human_vocab" machine_vocab_size -- size of the python dictionary "machine_vocab" Returns: model -- Keras model instance """ # Define the inputs of your model with a shape (Tx,) # Define s0 (initial hidden state) and c0 (initial cell state) # for the decoder LSTM with shape (n_s,) X = Input(shape=(Tx, human_vocab_size)) s0 = Input(shape=(n_s,), name='s0') c0 = Input(shape=(n_s,), name='c0') s = s0 c = c0 # Initialize empty list of outputs outputs = [] ### START CODE HERE ### # Step 1: Define your pre-attention Bi-LSTM. (≈ 1 line) a = Bidirectional(LSTM(units= n_a, return_sequences= True), input_shape = (m, Tx, 2*n_a))(X) # Step 2: Iterate for Ty steps for t in range(Ty): # Step 2.A: Perform one step of the attention mechanism to get back the context vector at step t (≈ 1 line) context = one_step_attention(a, s) # Step 2.B: Apply the post-attention LSTM cell to the "context" vector. # Don't forget to pass: initial_state = [hidden state, cell state] (≈ 1 line) s, _, c = post_activation_LSTM_cell(context, initial_state = [s, c]) # Step 2.C: Apply Dense layer to the hidden state output of the post-attention LSTM (≈ 1 line) out = output_layer(s) # Step 2.D: Append "out" to the "outputs" list (≈ 1 line) outputs.append(out) # Step 3: Create model instance taking three inputs and returning the list of outputs. (≈ 1 line) model = Model(inputs = [X, s0, c0], output = outputs) ### END CODE HERE ### return model # - # Run the following cell to create your model. model = model(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab)) # #### Troubleshooting Note # * If you are getting repeated errors after an initially incorrect implementation of "model", but believe that you have corrected the error, you may still see error messages when building your model. # * A solution is to save and restart your kernel (or shutdown then restart your notebook), and re-run the cells. # Let's get a summary of the model to check if it matches the expected output. model.summary() # **Expected Output**: # # Here is the summary you should see # <table> # <tr> # <td> # **Total params:** # </td> # <td> # 52,960 # </td> # </tr> # <tr> # <td> # **Trainable params:** # </td> # <td> # 52,960 # </td> # </tr> # <tr> # <td> # **Non-trainable params:** # </td> # <td> # 0 # </td> # </tr> # <tr> # <td> # **bidirectional_1's output shape ** # </td> # <td> # (None, 30, 64) # </td> # </tr> # <tr> # <td> # **repeat_vector_1's output shape ** # </td> # <td> # (None, 30, 64) # </td> # </tr> # <tr> # <td> # **concatenate_1's output shape ** # </td> # <td> # (None, 30, 128) # </td> # </tr> # <tr> # <td> # **attention_weights's output shape ** # </td> # <td> # (None, 30, 1) # </td> # </tr> # <tr> # <td> # **dot_1's output shape ** # </td> # <td> # (None, 1, 64) # </td> # </tr> # <tr> # <td> # **dense_3's output shape ** # </td> # <td> # (None, 11) # </td> # </tr> # </table> # # #### Compile the model # * After creating your model in Keras, you need to compile it and define the loss function, optimizer and metrics you want to use. # * Loss function: 'categorical_crossentropy'. # * Optimizer: [Adam](https://keras.io/optimizers/#adam) [optimizer](https://keras.io/optimizers/#usage-of-optimizers) # - learning rate = 0.005 # - $\beta_1 = 0.9$ # - $\beta_2 = 0.999$ # - decay = 0.01 # * metric: 'accuracy' # # Sample code # ```Python # optimizer = Adam(lr=..., beta_1=..., beta_2=..., decay=...) # model.compile(optimizer=..., loss=..., metrics=[...]) # ``` ### START CODE HERE ### (≈2 lines) opt = Adam(lr=0.005 , beta_1=0.9, beta_2=0.999, decay=0.01) model.compile(optimizer= opt, loss='categorical_crossentropy', metrics=['accuracy']) ### END CODE HERE ### # #### Define inputs and outputs, and fit the model # The last step is to define all your inputs and outputs to fit the model: # - You have input X of shape $(m = 10000, T_x = 30)$ containing the training examples. # - You need to create `s0` and `c0` to initialize your `post_attention_LSTM_cell` with zeros. # - Given the `model()` you coded, you need the "outputs" to be a list of 10 elements of shape (m, T_y). # - The list `outputs[i][0], ..., outputs[i][Ty]` represents the true labels (characters) corresponding to the $i^{th}$ training example (`X[i]`). # - `outputs[i][j]` is the true label of the $j^{th}$ character in the $i^{th}$ training example. s0 = np.zeros((m, n_s)) c0 = np.zeros((m, n_s)) outputs = list(Yoh.swapaxes(0,1)) # Let's now fit the model and run it for one epoch. model.fit([Xoh, s0, c0], outputs, epochs=1, batch_size=100) # While training you can see the loss as well as the accuracy on each of the 10 positions of the output. The table below gives you an example of what the accuracies could be if the batch had 2 examples: # # <img src="images/table.png" style="width:700;height:200px;"> <br> # <caption><center>Thus, `dense_2_acc_8: 0.89` means that you are predicting the 7th character of the output correctly 89% of the time in the current batch of data. </center></caption> # # # We have run this model for longer, and saved the weights. Run the next cell to load our weights. (By training a model for several minutes, you should be able to obtain a model of similar accuracy, but loading our model will save you time.) model.load_weights('models/model.h5') # You can now see the results on new examples. EXAMPLES = ['3 May 1979', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007', 'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001', '1 March 2001'] for example in EXAMPLES: source = string_to_int(example, Tx, human_vocab) source = np.array(list(map(lambda x: to_categorical(x, num_classes=len(human_vocab)), source))).swapaxes(0,1) prediction = model.predict([source, s0, c0]) prediction = np.argmax(prediction, axis = -1) output = [inv_machine_vocab[int(i)] for i in prediction] print("source:", example) print("output:", ''.join(output),"\n") # You can also change these examples to test with your own examples. The next part will give you a better sense of what the attention mechanism is doing--i.e., what part of the input the network is paying attention to when generating a particular output character. # ## 3 - Visualizing Attention (Optional / Ungraded) # # Since the problem has a fixed output length of 10, it is also possible to carry out this task using 10 different softmax units to generate the 10 characters of the output. But one advantage of the attention model is that each part of the output (such as the month) knows it needs to depend only on a small part of the input (the characters in the input giving the month). We can visualize what each part of the output is looking at which part of the input. # # Consider the task of translating "Saturday 9 May 2018" to "2018-05-09". If we visualize the computed $\alpha^{\langle t, t' \rangle}$ we get this: # # <img src="images/date_attention.png" style="width:600;height:300px;"> <br> # <caption><center> **Figure 8**: Full Attention Map</center></caption> # # Notice how the output ignores the "Saturday" portion of the input. None of the output timesteps are paying much attention to that portion of the input. We also see that 9 has been translated as 09 and May has been correctly translated into 05, with the output paying attention to the parts of the input it needs to to make the translation. The year mostly requires it to pay attention to the input's "18" in order to generate "2018." # ### 3.1 - Getting the attention weights from the network # # Lets now visualize the attention values in your network. We'll propagate an example through the network, then visualize the values of $\alpha^{\langle t, t' \rangle}$. # # To figure out where the attention values are located, let's start by printing a summary of the model . model.summary() # Navigate through the output of `model.summary()` above. You can see that the layer named `attention_weights` outputs the `alphas` of shape (m, 30, 1) before `dot_2` computes the context vector for every time step $t = 0, \ldots, T_y-1$. Let's get the attention weights from this layer. # # The function `attention_map()` pulls out the attention values from your model and plots them. attention_map = plot_attention_map(model, human_vocab, inv_machine_vocab, "Tuesday 09 Oct 1993", num = 7, n_s = 64); # On the generated plot you can observe the values of the attention weights for each character of the predicted output. Examine this plot and check that the places where the network is paying attention makes sense to you. # # In the date translation application, you will observe that most of the time attention helps predict the year, and doesn't have much impact on predicting the day or month. # ### Congratulations! # # # You have come to the end of this assignment # # ## Here's what you should remember # # - Machine translation models can be used to map from one sequence to another. They are useful not just for translating human languages (like French->English) but also for tasks like date format translation. # - An attention mechanism allows a network to focus on the most relevant parts of the input when producing a specific part of the output. # - A network using an attention mechanism can translate from inputs of length $T_x$ to outputs of length $T_y$, where $T_x$ and $T_y$ can be different. # - You can visualize attention weights $\alpha^{\langle t,t' \rangle}$ to see what the network is paying attention to while generating each output. # Congratulations on finishing this assignment! You are now able to implement an attention model and use it to learn complex mappings from one sequence to another.
Neural_machine_translation_with_attention_v4a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # Fill in any place that says `# YOUR CODE HERE` or YOUR ANSWER HERE, as well as your name and collaborators below. # Grading for pre-lecture assignments is all or nothing. Partial credit is available for in-class assignments and checkpoints, but **only when code is commented**. # - NAME = "" COLLABORATORS = "" # --- # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "13d6b1a164aaef5755a45afb6c9e9600", "grade": false, "grade_id": "cell-d93ae74c78cd7d44", "locked": true, "schema_version": 3, "solution": false, "task": false} import grading_helper as _test # + deletable=false nbgrader={"cell_type": "code", "checksum": "560d9bdcb695c46609552b6dcc0a5d72", "grade": false, "grade_id": "cell-acd067a3edcf1696", "locked": false, "schema_version": 3, "solution": true, "task": false} # Space for imports, utility functions, etc. # YOUR CODE HERE # + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "293ae8f028eab6202aa60641b533d1e7", "grade": false, "grade_id": "cell-476563a7f6cd7781", "locked": true, "schema_version": 3, "solution": false, "task": false} # # Kepler's Third Law # # Download the file called `exoplanets.csv`. It contains orbital and physical parameters for 2950 exoplanets. (It will look like a spreadsheet if you open it with JupyterLab, but it's really just a text file.) # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "c68bf9686761d4a19dd1e1ba5f624ace", "grade": false, "grade_id": "cell-ad6652c0fc5ace83", "locked": true, "schema_version": 3, "solution": false, "task": false} # This code downloads the file from the Internet and saves it in the working directory import os import urllib.request as url if not os.path.isfile("exoplanets.csv"): url.urlretrieve("http://www.pitt.edu/~djn23/phys1321/exoplanets.csv", "exoplanets.csv") # - # Load the data from this file. You only need the following three columns: # 1. MSTAR (the mass of the planet's star) # 2. A (the semi-major axis of the planet's orbit) # 3. PER (the period of the planet's orbit) # # > Be aware that there is missing data in the file, which is annoying to deal with using `np.loadtxt()`. Instead, I recommend using `np.genfromtxt()`. It has a lot of optional arguments, so read the documentation for it. # > # > Missing data should be replaced by `np.nan` (not a number). `genfromtxt` does this by default. You want to do this, because `matplotlib` automatically ignores `nan` when plotting. You will see a pink warning message that says `RuntimeWarning: invalid value encountered in...` That's just a warning about the presence of `nan` in your arrays, so in this case, you can safely ignore it. If it really bothers you, run the command `np.seterr(invalid="ignore")` at the beginning of your program. # # Once you have the data loaded into arrays, your next task is to verify Kepler's 3rd Law, which says that # $$T^2\propto\frac{a^3}{M}\,,$$ # where $T$ is the period of the orbit, $a$ is the semimajor axis, and $M$ is the mass of the *star*. The proportionality constant is equal to 1 if the units of period are in years, the semi-major axis is in AU (astronomical units), and the mass of the star is in units of solar masses. (In other words, watch your units.) # # Make a scatter plot of $a^3/M$ versus $T^2$ for values of $T$ **less than 20 years**. Use a color **and** symbol other than the defaults. # # On top of the scatter plot, plot a line with slope 1 and y-intercept 0 (i.e. print the expected relationship), using a line. Again, use a linestyle **and** color other than the defaults. Add a legend to your plot, and label the $x$ and $y$-axes appropriately. # # > The line and data should line up well. If they don't, you did something wrong. # + deletable=false nbgrader={"cell_type": "code", "checksum": "8e1d1dac5a0884f64a3fb0f835f5ad49", "grade": true, "grade_id": "cell-e5dce29f90dca8dc", "locked": false, "points": 20, "schema_version": 3, "solution": true, "task": false} # %%graded # 20 points # YOUR CODE HERE
Assignments/03.5 Checkpoint - Kepler's Third Law.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt plt.rcParams.update({ "text.usetex": True, "font.family": "sans-serif", "font.sans-serif": ["Helvetica"]}) # for Palatino and other serif fonts use: plt.rcParams.update({ "text.usetex": True, "font.family": "serif", "font.serif": ["Palatino"], }) # %matplotlib inline import pymesh #https://pymesh.readthedocs.io/en/latest/basic.html import time import multiprocessing import meshplot import itertools from multiprocessing import Pool # Process pool from multiprocessing import sharedctypes plt.rcParams['xtick.labelsize'] = 14 plt.rcParams['ytick.labelsize'] = 14 from mshmthds import * from BYORP_YORP import * import sys # - # # The surface thermal inertia is neglected, so that thermal radiation is re-emitted with no time lag, and the reflected and thermally radiated components are assumed Lambertian (isotropic) and so emitted with flux # parallel to the local surface normal. We ignore heat conduction. The surface is described with a closed # triangular mesh. # # # The radiation force from the $i$-th facet is # $$ {\bf F}_i = - \frac{F_\odot}{c} {S_i} (\hat {\bf n}_i \cdot \hat {\bf s}_\odot) \hat {\bf n}_i $$ # where $S_i$ is the area of the $i$-th facet and $\hat {\bf n}_i$ is its surface normal. # Here $F_\odot$ is the solar radiation flux and $c$ is the speed of light. # The direction of the Sun is $\hat {\bf s}_\odot$. # # The total Yarkovsky force is a sum over all the facets # $${\bf F}_Y = \sum_{i: \hat {\bf n}_i \cdot \hat {\bf s}_\odot >0} {\bf F}_i $$ # Only facets on the day side or with $\hat {\bf n}_i \cdot \hat {\bf s}_\odot >0$ # are included in the sum. # # The torque affecting the binary orbit from a single facet is # $$ {\boldsymbol \tau}_{i,B} = # \begin{cases} # - \frac{F_\odot}{c} {S_i} (\hat {\bf n}_i \cdot \hat {\bf s}_\odot) ( {\bf a}_B \times \hat {\bf n}_i) # & \mbox{if } \hat {\bf n}_i \cdot \hat {\bf s}_\odot >0 \\ # 0 & \mbox{otherwise} # \end{cases} # $$ # where ${\bf a}_B$ is the secondary's radial vector from the binary center of mass. # # # The torque affecting the binary orbit is the sum of the torques from each facet and should be an average # over the orbit around the Sun and # over the binary orbit and spin of the secondary. # $$ {\boldsymbol \tau}_{BY} = \frac{1}{T} \int_0^T dt\ \sum_{i: \hat {\bf n}_i \cdot \hat {\bf s}_\odot >0} # {\boldsymbol \tau}_{i,B} $$ # # # If $\hat {\bf l}$ is the binary orbit normal then # $$ {\boldsymbol \tau}_{BY} \cdot \hat {\bf l} $$ # changes the binary's orbital angular momentum and causes binary orbit migration. # # # The torque affecting the spin (also known as YORP) instantaneously depends on # the radii of each facit ${\bf r}_i$ from the asteroid center of mass # $$ {\boldsymbol \tau}_{i,s} = \begin{cases} # - \frac{F_\odot}{c} {S_i} (\hat {\bf n}_i \cdot \hat {\bf s}_\odot) ({\bf r}_i \times \hat{\bf n}_i) # & \mbox{if } \hat {\bf n}_i \cdot \hat {\bf s}_\odot >0 \\ # 0 & \mbox{otherwise} # \end{cases}$$ # # # $$ {\boldsymbol \tau}_Y = \frac{1}{T} \int_0^T dt \ \sum_{i: \hat {\bf n}_i \cdot \hat {\bf s}_\odot >0} {\boldsymbol \tau}_{i,s} $$ # where the average is done over the orbit about the Sun and the spin of the asteroid. # If the spin axis is $\hat {\boldsymbol \omega}$ then # $$ {\boldsymbol \tau}_Y \cdot \hat {\boldsymbol \omega} $$ gives the body spin up or spin down rate. # # # In practice we average over the Sun's directions first and then average over spin (for YORP) or and spin and binary orbit direction (for BYORP) afterward. # # # <b> Units </b> # # For our calculation are $F_\odot/c = 1$. # # For YORP $R=1$. # For BYORP $a_B = 1$ and $R=1$ (in the surface area). # # Here $R$ is volume equivalent sphere radius. # # To put in physical units: # # Multiply ${\boldsymbol \tau}_Y$ by $\frac{F_\odot R^3}{c}$. # # Multiply ${\boldsymbol \tau}_{BY}$ by $\frac{F_\odot R^2 a_B}{c}$. # # Alternatively we are computing: # # ${\boldsymbol \tau}_Y \times \frac{c}{F_\odot R^3} $ # # ${\boldsymbol \tau}_{BY} \times \frac{c}{F_\odot R^2 a_B} $ # # # To get the rate the spin changes for YORP # # $\dot \omega = \frac{ {\boldsymbol \tau}_Y \cdot \hat {\bf s} }{C} $ # # where $C$ is the moment of inertia about the spin axis. # # To order of magnitude what we are computing can be multiplied by # $\frac{F_\odot R^3}{c MR^2} $ to estimate $\dot \omega$ # and by $\frac{F_\odot R^3}{c MR^2 \omega} $ # to estimate $\dot \epsilon$. # # To get the rate that obliquity changes for YORP # # $\dot \epsilon = \frac{ {\boldsymbol \tau}_Y \cdot \hat {\boldsymbol \phi} }{C \omega} $ # # where unit vector $\hat {\boldsymbol \phi}$ is in the xy plane (ecliptic) and is perpendicular to the spin axis. # # To get the semi-major axis drift rate for BYORP # # $ \dot a_B = \frac{2 {\boldsymbol \tau}_{BY} \cdot \hat {\bf l}}{M n_Ba_B} $ # # where $M$ is the secondary mass, $n_B$ and $a_B$ are binary orbit mean motion and semi-major axis. # # To order of magnitude to get the drift rate we multiply what we are getting by # $\frac{F_\odot R^2 a_B}{c} \times \frac{1}{M n_B a_B}$. # # # Dimensionless numbers used by Steiberg+10 (eqns 19,48) # # $f_{Y} \equiv \tau_{Y} \frac{3}{2} \frac{c}{\pi R^3 F_\odot}$ # # $f_{BY} \equiv \tau_{BY} \frac{3}{2} \frac{c}{\pi R^2 a_B F_\odot}$ # # Our computed values are the same as theirs except for a factor of 3/2 # (but they have a 2/3 in their torque) and a factor of $\pi$. # We need to divide by $\pi$ to have values consistent with theirs. # # <b> Assumptions:</b> # # Circular orbit for binary. # # Circuilar orbit for binary around Sun. # # No shadows. # # No conduction. Lambertian isotropic emission. No thermal lag. # # We neglect distance of facet centroids from secondary center of mass when computing BYORP. # # Coordinate system: # binary orbit is kept in xy plane # # Compare YORP on primary to BYORP on secondary. # # $\frac{\tau_{Yp}}{\tau_{BY} }\sim \frac{R_p^2 }{R_s^2 } \frac{R_p }{a_B }\frac{f_Y}{ f_{BY}}$ # # For Didymos, this is about $8 f_Y/f_{BY}$. squannit = pymesh.load_mesh("kw4b.obj") SIZEOFMESH = 0.03 short_squannit, info = pymesh.collapse_short_edges(squannit, SIZEOFMESH) folder = 'Squannit'+ str(len(short_squannit.faces)) print(folder) # + # compute the BYORP torque on body as a function of obliquity # for a given inclination and precession angle # returns obliquity and torque arrays #Create the mesh squannit = pymesh.load_mesh("kw4b.obj") short_squannit, info = pymesh.collapse_short_edges(squannit, SIZEOFMESH) vertices = short_squannit.vertices faces = short_squannit.faces #Simulation Parameters size = 20 # Number of Obliquities block_size = 1 # Obliquities per subprocess incl = 0; phi_prec=0 tau_s_arr = np.ctypeslib.as_ctypes(np.zeros((size))) shared_array_tau_s = sharedctypes.RawArray(tau_s_arr._type_, tau_s_arr) tau_o_arr = np.ctypeslib.as_ctypes(np.zeros((size))) shared_array_tau_o = sharedctypes.RawArray(tau_o_arr._type_, tau_o_arr) o_arr = np.ctypeslib.as_ctypes(np.zeros((size))) shared_array_o = sharedctypes.RawArray(o_arr._type_, o_arr) # YORP Methods # compute the YORP torque on body as a function of obliquity # here obliquity is w.r.t Sun # returns obliquity and torque arrays def aj_alt_obliq_Y_fig(nobliq): body = pymesh.form_mesh(vertices, faces) body.add_attribute("face_area") body.add_attribute("face_normal") nphi_Sun=36 # number of solar positions nphi = 36 # number of spin positions # nobliq = 20 # number of obliquities dobliq = np.pi/20 tau_s_arr = np.ctypeslib.as_array(shared_array_tau_s) # to store torques tau_o_arr = np.ctypeslib.as_array(shared_array_tau_o) # to store torques o_arr = np.ctypeslib.as_array(shared_array_o) # to store obliquities in degrees print(f'Starting {nobliq}') for i in range(nobliq, nobliq+block_size): obliquity=i*dobliq tau_Y_x,tau_Y_y,tau_Y_z,tau_s,tau_o =compute_Y(body,obliquity,nphi,nphi_Sun) #print(tau_s) tau_s_arr[i] = tau_s tau_o_arr[i] = tau_o o_arr[i] = obliquity*180/np.pi print(f'Finished {nobliq}') return o_arr, tau_s_arr, tau_o_arr start=time.perf_counter() p=Pool() # compute YORPs as a function of obliquity (single body, obliquity w.r.t Solar orbit) print('Starting Analysis') res = p.map(aj_alt_obliq_Y_fig, range(size)) multi_o_arr = np.ctypeslib.as_array(shared_array_o) multi_tau_arr_s = np.ctypeslib.as_array(shared_array_tau_s) multi_tau_arr_o = np.ctypeslib.as_array(shared_array_tau_o) end = time.perf_counter() print(f'Time to complete {round(end - start,2)} second(s)') # o_arr, tau_s_arr, tau_o_arr = obliq_Y_fig(body) # also check the sphere for YORP # o_arr2, tau_s_arr2,tau_o_arr2 = obliq_Y_fig(sphere) # note y axis # compare the two YORPs fig,ax = plt.subplots(1,1,figsize=(5,4),dpi=150) # ax.plot(o_arr2,tau_s_arr2,'go-',label='sphere') #ax.plot(o_arr2,tau_o_arr2,'bo-',label='sphere') ax.plot(multi_o_arr,multi_tau_arr_s,'rD-',label=r'body, $s$') ax.plot(multi_o_arr,multi_tau_arr_o,'D:',label='body, $o$', color='orange') ax.set_xlabel('obliquity (deg)',fontsize=16) ax.set_ylabel(r'${ \tau}_Y \cdot \hat{ s}, { \tau}_Y \cdot \hat{\phi}$',fontsize=16) ax.legend() fig.savefig(folder+'/ObliqY') # + # compute the BYORP torque on body as a function of inclination # for a given obliquity and precession angle # returns inclination and torque arrays #Create the mesh squannit = pymesh.load_mesh("kw4b.obj") short_squannit, info = pymesh.collapse_short_edges(squannit, SIZEOFMESH) vertices = short_squannit.vertices faces = short_squannit.faces #Simulation Parameters size = 20 # Number of Inclinations block_size = 1 # Obliquities per subprocess obliquity = 0; phi_prec=0 tau_l_arr = np.ctypeslib.as_ctypes(np.zeros((size))) shared_array_tau = sharedctypes.RawArray(tau_l_arr._type_, tau_l_arr) i_arr = np.ctypeslib.as_ctypes(np.zeros((size))) shared_array_i = sharedctypes.RawArray(i_arr._type_, i_arr) def aj_alt_obliq_BY_fig(nincl): body = pymesh.form_mesh(vertices, faces) body.add_attribute("face_area") body.add_attribute("face_normal") #mesh.add_attribute("vertex_normal") body.add_attribute("face_centroid") f_area = body.get_attribute("face_area") phi0=0 nphi_Sun=36 # number of solar positions nphi = 36 # number of spin positions dincl = np.pi/size tau_l_arr = np.ctypeslib.as_array(shared_array_tau) # to store torques i_arr = np.ctypeslib.as_array(shared_array_i) print(f'Started {nincl}') for i in range(nincl, nincl+block_size): incl=i*dincl tau_BY_x,tau_BY_y,tau_BY_z, tau_l =compute_BY(body,obliquity,nphi,nphi_Sun,incl,phi0,phi_prec) i_arr[i] = incl*180/np.pi tau_l_arr[i] = tau_l print(f'Finished {nincl}') return i_arr,tau_l_arr # compute BYORPs as a function of inclination # i_arr,tau_l_arr = obliq_BY_fig(body,obliquity,phi_prec) start = time.perf_counter() p = Pool() #Complete BYORP analysis print('Starting analysis') res = p.map(aj_alt_obliq_BY_fig, range(20)) multi_i_arr = np.ctypeslib.as_array(shared_array_o) multi_tau_l_arr = np.ctypeslib.as_array(shared_array_tau) end = time.perf_counter()#Print Time print(f'D8: time to complete {round(end - start,2)} second(s)') fig,ax = plt.subplots(1,1,figsize=(5,4),dpi=150) ax.plot(multi_i_arr,multi_tau_l_arr,'rD-',label='body') ax.set_xlabel('inclination (deg)',fontsize=16) ax.set_ylabel(r'${\tau}_{BY} \cdot \hat{l}$',fontsize=16) ax.legend() fig.savefig(folder+'/InclBY') # + # compute the BYORP torque on body as a function of obliquity # for a given inclination and precession angle # returns obliquity and torque arrays #Create the mesh squannit = pymesh.load_mesh("kw4b.obj") short_squannit, info = pymesh.collapse_short_edges(squannit, SIZEOFMESH) vertices = short_squannit.vertices faces = short_squannit.faces #Simulation Parameters size = 60 # Number of Obliquities block_size = 1 # Obliquities per subprocess incl = 0; phi_prec=0 tau_l_arr = np.ctypeslib.as_ctypes(np.zeros((60))) shared_array_tau = sharedctypes.RawArray(tau_l_arr._type_, tau_l_arr) o_arr = np.ctypeslib.as_ctypes(np.zeros((60))) shared_array_o = sharedctypes.RawArray(o_arr._type_, o_arr) def aj_alt_obliq_BY_fig2(nobliq): body = pymesh.form_mesh(vertices, faces) body.add_attribute("face_area") body.add_attribute("face_normal") #mesh.add_attribute("vertex_normal") body.add_attribute("face_centroid") f_area = body.get_attribute("face_area") phi0=0 nphi_Sun=36 # number of solar positions nphi = 36 # number of spin positions dobliq = np.pi/60 tau_l_arr = np.ctypeslib.as_array(shared_array_tau) # to store torques o_arr = np.ctypeslib.as_array(shared_array_o) for i in range(nobliq, nobliq+block_size): obliquity=i*dobliq tau_BY_x,tau_BY_y,tau_BY_z, tau_l = compute_BY(body,obliquity,nphi,nphi_Sun,incl,phi0,phi_prec) o_arr[i] = obliquity*180/np.pi tau_l_arr[i] = tau_l print(f'Finished {nobliq}') return o_arr,tau_l_arr start = time.perf_counter() p = Pool() #Complete BYORP analysis print('Starting analysis') res = p.map(aj_alt_obliq_BY_fig2, range(60)) multi_o_arr = np.ctypeslib.as_array(shared_array_o) multi_tau_l_arr = np.ctypeslib.as_array(shared_array_tau) end = time.perf_counter()#Print Time print(f'D8: time to complete {round(end - start,2)} second(s)') fig,ax = plt.subplots(1,1,figsize=(5,4),dpi=300) ax.plot(multi_o_arr,multi_tau_l_arr,'go-',label='sphere') ax.plot(multi_o_arr,multi_tau_l_arr,'rD-',label='body') ax.set_xlabel('obliquity (deg)',fontsize=16) ax.set_ylabel(r'${ \tau}_{BY} \cdot \hat{l}$',fontsize=16) ax.legend() fig.savefig(folder+'/ObliqBY') # + # compute the BYORP torque on body as a function of precession angle # for a given obliquity and inclination # returns precession angle and torque arrays #Create the mesh squannit = pymesh.load_mesh("kw4b.obj") short_squannit, info = pymesh.collapse_short_edges(squannit, SIZEOFMESH) vertices = short_squannit.vertices faces = short_squannit.faces #Simulation Parameters size = 30 # Number of Precession Angles block_size = 1 # Precession < per subprocess incl = 0; obliquity=np.pi/4 tau_l_arr = np.ctypeslib.as_ctypes(np.zeros((size))) shared_array_tau = sharedctypes.RawArray(tau_l_arr._type_, tau_l_arr) p_arr = np.ctypeslib.as_ctypes(np.zeros((size))) shared_array_p = sharedctypes.RawArray(p_arr._type_, p_arr) def aj_alt_obliq_BY_fig3(nprec): body = pymesh.form_mesh(vertices, faces) body.add_attribute("face_area") body.add_attribute("face_normal") #mesh.add_attribute("vertex_normal") body.add_attribute("face_centroid") f_area = body.get_attribute("face_area") phi0=0 nphi_Sun=36 # number of solar positions nphi = 36 # number of spin positions dprec = np.pi/size # only goes from 0 to pi tau_l_arr = np.ctypeslib.as_array(shared_array_tau) # to store torques p_arr = np.ctypeslib.as_array(shared_array_p) print(f'Starting {nprec}') for i in range(nprec, nprec+block_size): phi_prec=i*dprec tau_BY_x,tau_BY_y,tau_BY_z, tau_l =compute_BY(body,obliquity,nphi,nphi_Sun,incl,phi0,phi_prec) p_arr[i] = phi_prec*180/np.pi tau_l_arr[i] = tau_l print(f'Finished {nprec}') return p_arr,tau_l_arr # compute BYORPs as a function of precession angle, seems not sensitive to precession angle # p_arr,tau_l_arr = obliq_BY_fig3(body,obliquity,incl) start = time.perf_counter() p = Pool() #Complete BYORP analysis print('Starting analysis') res = p.map(aj_alt_obliq_BY_fig3, range(size)) multi_p_arr = np.ctypeslib.as_array(shared_array_p) multi_tau_l_arr = np.ctypeslib.as_array(shared_array_tau) end = time.perf_counter()#Print Time print(f'D8: time to complete {round(end - start,2)} second(s)') fig,ax = plt.subplots(1,1,figsize=(5,4),dpi=150) ax.plot(multi_p_arr,multi_tau_l_arr,'rD-',label='body') ax.set_xlabel('precession angle (deg)',fontsize=16) ax.set_ylabel(r'${ \tau}_{BY} \cdot \hat{l}$',fontsize=16) ax.legend() fig.savefig(folder+'/PrecessBY') # + # compute the BYORP torque on body as a function of libration angle phi0 # for a given obliquity and inclination and precession angle # returns libration angle and torque arrays #Create the mesh squannit = pymesh.load_mesh("kw4b.obj") short_squannit, info = pymesh.collapse_short_edges(squannit, SIZEOFMESH) vertices = short_squannit.vertices faces = short_squannit.faces #Simulation Parameters size = 20 # Number of Libration Angles block_size = 1 # Precession < per subprocess incl = 0; phi_prec=0; obliquity = np.pi/4 tau_l_arr = np.ctypeslib.as_ctypes(np.zeros((size))) shared_array_tau = sharedctypes.RawArray(tau_l_arr._type_, tau_l_arr) l_arr = np.ctypeslib.as_ctypes(np.zeros((size))) shared_array_l = sharedctypes.RawArray(l_arr._type_, l_arr) def aj_alt_obliq_BY_fig4(nlib): body = pymesh.form_mesh(vertices, faces) body.add_attribute("face_area") body.add_attribute("face_normal") #mesh.add_attribute("vertex_normal") body.add_attribute("face_centroid") f_area = body.get_attribute("face_area") phi0=0 nphi_Sun=36 # number of solar positions nphi = 36 # number of spin positions dlib = 0.5*np.pi/size # going from -pi/4 to pi/4 tau_l_arr = np.ctypeslib.as_array(shared_array_tau) # to store torques l_arr = np.ctypeslib.as_array(shared_array_l) print(f'Staring {nlib}') for i in range(nlib, nlib + block_size): phi0=i*dlib - np.pi/4 tau_BY_x,tau_BY_y,tau_BY_z, tau_l =compute_BY(body,obliquity,nphi,nphi_Sun,incl,phi0,phi_prec) l_arr[i] = phi0*180/np.pi tau_l_arr[i] = tau_l print(f'Finished {nlib}') return l_arr,tau_l_arr # compute BYORPs as a function of libration angle # l_arr,tau_l_arr=obliq_BY_fig4(body,obliquity,incl,phi_prec) #plt.savefig('tau_BY_lib.png') # fairly sensitive to libration angle start = time.perf_counter() p = Pool() #Complete BYORP analysis print('Starting analysis') res = p.map(aj_alt_obliq_BY_fig4, range(size)) multi_l_arr = np.ctypeslib.as_array(shared_array_l) multi_tau_l_arr = np.ctypeslib.as_array(shared_array_tau) end = time.perf_counter()#Print Time print(f'D8: time to complete {round(end - start,2)} second(s)') fig,ax = plt.subplots(1,1,figsize=(5,4),dpi=150) #ax.plot(o_arr2,tau_l_arr2,'go-',label='sphere') ax.plot(multi_l_arr,multi_tau_l_arr,'rD-',label='body') ax.set_xlabel('libration angle (deg)',fontsize=16) ax.set_ylabel(r'${ \tau}_{BY} \cdot \hat{l}$',fontsize=16) ax.legend() fig.savefig(folder+'/LibBY') # -
myexamples/pylab/BYORP4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import torch import torchtext from torchtext.datasets import translation, imdb, language_modeling, nli from torchtext.datasets import sequence_tagging, unsupervised_learning, text_classification, sst import pytorch_lightning as pl import torch.nn as nn import torch.nn.functional as F from torch.utils.data import random_split, DataLoader, Dataset # - def generate_batch(batch): label = torch.tensor([entry[0] for entry in batch]) text = [entry[1] for entry in batch] offsets = [0] + [len(entry) for entry in text] offsets = torch.tensor(offsets[:-1]).cumsum(dim=0) text = torch.cat(text) return text, offsets, label device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # + DATA_ROOT_PATH = '../data/text_classification/' NGRAMS = 2 VOCAB = None BATCH_SIZE = 16 EMBED_DIM = 32 # - train_dataset, test_dataset = text_classification.SogouNews(root=DATA_ROOT_PATH, ngrams=NGRAMS, vocab=VOCAB) VOCAB_SIZE = len(train_dataset.get_vocab()) NUN_CLASS = len(train_dataset.get_labels()) # + train_len = int(len(train_dataset) * 0.95) sub_train_, sub_valid_ = random_split(train_dataset, [train_len, len(train_dataset) - train_len]) train_loader = DataLoader(sub_train_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=generate_batch, num_workers=8) valid_loader = DataLoader(sub_valid_, batch_size=BATCH_SIZE, shuffle=False, collate_fn=generate_batch, num_workers=8) # + lengths, text = train_dataset[10] text.shape # - texts, offsets, cls = next(iter(train_loader)) vocab = train_dataset.get_vocab() print([vocab.itos[token] for token in texts.tolist()]) texts.shape, offsets.shape, cls.shape class TextSentiment(nn.Module): def __init__(self, vocab_size, embed_dim, num_class): super().__init__() self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True) self.fc = nn.Linear(embed_dim, num_class) self.init_weights() def init_weights(self): initrange = 0.5 self.embedding.weight.data.uniform_(-initrange, initrange) self.fc.weight.data.uniform_(-initrange, initrange) self.fc.bias.data.zero_() def forward(self, text, offsets): embedded = self.embedding(text, offsets) return self.fc(embedded) # + from pytorch_lightning.metrics import Accuracy class TaskTextSentiment(pl.LightningModule): def __init__(self, model, optimizers, criterion, scheduler=None): super().__init__() self.model = model self.optimizer = optimizer self.criterion = criterion self.scheduler = scheduler self.metric = Accuracy() def shared_step(self, batch, batch_idx): text, offsets, cls = batch output = self.model(text, offsets) loss = self.criterion(output, cls) # acc = (output.argmax(1) == cls).sum().item() acc = self.metric(output.argmax(1), cls) return loss, acc def training_step(self, batch, batch_idx): loss, acc = self.shared_step(batch, batch_idx) result = pl.TrainResult(loss) # result.log_dict({'trn_loss': loss}) result.log_dict({'trn_loss': loss, 'trn_acc':acc}) return result def validation_step(self, batch, batch_idx): loss, acc = self.shared_step(batch, batch_idx) result = pl.EvalResult(checkpoint_on=loss) # result.log_dict({'val_loss': loss}) result.log_dict({'val_loss': loss, 'val_acc': acc}) return result def configure_optimizers(self): if self.scheduler: return [self.optimizer], [self.scheduler] return self.optimizer # - model = TextSentiment(VOCAB_SIZE, EMBED_DIM, NUN_CLASS) # model.to(device) criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=4.0) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.9) # + from pytorch_lightning import loggers as pl_loggers tb_logger = pl_loggers.TensorBoardLogger('logs/sogoue') task = TaskTextSentiment(model, optimizer, criterion, scheduler) trainer = pl.Trainer(gpus=1, logger=tb_logger) trainer.fit(task, train_loader, valid_loader) # - # + from torch.utils.data import DataLoader def train_func(sub_train_): # Train the model train_loss = 0 train_acc = 0 data = DataLoader(sub_train_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=generate_batch) for i, (text, offsets, cls) in enumerate(data): optimizer.zero_grad() text, offsets, cls = text.to(device), offsets.to(device), cls.to(device) output = model(text, offsets) loss = criterion(output, cls) train_loss += loss.item() loss.backward() optimizer.step() train_acc += (output.argmax(1) == cls).sum().item() # Adjust the learning rate scheduler.step() return train_loss / len(sub_train_), train_acc / len(sub_train_) def test(data_): loss = 0 acc = 0 data = DataLoader(data_, batch_size=BATCH_SIZE, collate_fn=generate_batch) for text, offsets, cls in data: text, offsets, cls = text.to(device), offsets.to(device), cls.to(device) with torch.no_grad(): output = model(text, offsets) loss = criterion(output, cls) loss += loss.item() acc += (output.argmax(1) == cls).sum().item() return loss / len(data_), acc / len(data_) # + import time from torch.utils.data.dataset import random_split N_EPOCHS = 5 min_valid_loss = float('inf') criterion = torch.nn.CrossEntropyLoss().to(device) optimizer = torch.optim.SGD(model.parameters(), lr=4.0) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.9) train_len = int(len(train_dataset) * 0.95) sub_train_, sub_valid_ = random_split(train_dataset, [train_len, len(train_dataset) - train_len]) for epoch in range(N_EPOCHS): start_time = time.time() train_loss, train_acc = train_func(sub_train_) valid_loss, valid_acc = test(sub_valid_) secs = int(time.time() - start_time) mins = secs / 60 secs = secs % 60 print('Epoch: %d' %(epoch + 1), " | time in %d minutes, %d seconds" %(mins, secs)) print(f'\tLoss: {train_loss:.4f}(train)\t|\tAcc: {train_acc * 100:.1f}%(train)') print(f'\tLoss: {valid_loss:.4f}(valid)\t|\tAcc: {valid_acc * 100:.1f}%(valid)') # -
notebook/text_sentiment_sogoue.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # + path = 'producerNom.txt' data = np.loadtxt(path, skiprows=2).T dataNew = np.array([data[0], data[1]/np.max(data[1])*5000, data[2], data[3]]) np.savetxt('producerNomNew.txt', dataNew.T) # + # create constant producer profile time = np.linspace(0, 31536000, num=int(31536000/3600)+1) QFlow = 10000 data = np.full(len(time), QFlow) np.savetxt('consumerConst.txt', np.array([Time, data]).T) # + data3 = np.array([data1[0], data2[1], data1[3]+60, data1[3]]) np.shape(data3[3]) np.savetxt('producerNom_neu.txt', data3.T) # - plt.plot(dataNew[0]/3600/24, dataNew[1]) plt.show() plt.plot(data3[0]/3600/24, data3[2]) plt.show() plt.plot(data3[0]/3600/24, data3[3]) plt.show() # + dataLen = len(data[0]) dataTem = (np.sin(data[0]/3600/12)*5+ 273.15 + 10) - np.cos(data[0]/3600/24/(365/6))*5 plt.plot(data[0]/3600/24, dataTem) plt.show() # - dataNew = np.array([data[0], data[1], data[2], dataTem]) np.savetxt('producerNom_neu.txt', dataNew.T) plt.plot(dataNew[0], dataNew[2]) plt.show()
dhcSim/Resources/LoadProf/default/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2. Feature_extraction_from_VGG16_to_InceptionResNetV2 # ### References: # 1. https://github.com/ypwhs/dogs_vs_cats # 2. https://www.kaggle.com/yangpeiwen/keras-inception-xception-0-47 # ### Import pkgs # + import h5py import os import time from keras.layers import * from keras.models import * from keras.applications import * from keras.optimizers import * from keras.regularizers import * from keras.preprocessing.image import * # + def get_features(MODEL, image_size, batch_size=1, lambda_func=None): print('{0} start.'.format(MODEL.__name__)) start_time = time.time() width = image_size[0] height = image_size[1] input_tensor = Input((height, width, 3)) x = input_tensor if lambda_func: print(lambda_func.__name__) x = Lambda(lambda_func)(x) base_model = MODEL(input_tensor=x, weights='imagenet', include_top=False) model = Model(base_model.input, GlobalAveragePooling2D()(base_model.output)) cwd = os.getcwd() data_train_path = os.path.join(cwd, 'input', 'data_train') data_val_path = os.path.join(cwd, 'input', 'data_validation') # data_test_a_path = os.path.join(cwd, 'input', 'data_test_a') data_test_b_path = os.path.join(cwd, 'input', 'data_test_b') gen = ImageDataGenerator() # gen = ImageDataGenerator(zoom_range = 0.1, # height_shift_range = 0.1, # width_shift_range = 0.1, # rotation_range = 10) train_generator = gen.flow_from_directory(data_train_path, image_size, shuffle=False, batch_size=batch_size) val_generator = gen.flow_from_directory(data_val_path, image_size, shuffle=False, batch_size=batch_size) # test_a_generator = gen.flow_from_directory(data_test_a_path, image_size, shuffle=False, # batch_size=batch_size) test_b_generator = gen.flow_from_directory(data_test_b_path, image_size, shuffle=False, batch_size=batch_size) # train = model.predict_generator(train_generator, verbose=1, steps=53879) # val = model.predict_generator(val_generator, verbose=1, steps=7120) # test = model.predict_generator(test_generator, verbose=1, steps=7040) train = model.predict_generator(train_generator, verbose=1, steps=10, max_queue_size=128, workers=16) val = model.predict_generator(val_generator, verbose=1, steps=10, max_queue_size=128, workers=16) # test_a = model.predict_generator(test_a_generator, verbose=1, steps=10, max_queue_size=128, workers=16) test_b = model.predict_generator(test_b_generator, verbose=1, steps=10, max_queue_size=128, workers=16) # print('filenames:' + str(len(val_generator.filenames))) # print(val_generator.filenames[0:10]) # print('filenames:' + str(len(test_generator.filenames))) # print(test_generator.filenames[0:10]) file_name = os.path.join(cwd, 'model', 'feature_{0}_{1}.h5'.format(MODEL.__name__, 171202)) print(file_name) if os.path.exists(file_name): os.remove(file_name) with h5py.File(file_name) as h: h.create_dataset("train", data=train) h.create_dataset("train_label", data=train_generator.classes) h.create_dataset("val", data=val) h.create_dataset("val_label", data=val_generator.classes) # h.create_dataset("test_a", data=test_a) h.create_dataset("test_b", data=test_b) # print(train.shape) # print(len(train_generator.classes)) # print(val.shape) # print(len(val_generator.classes)) # print(test.shape) # print(dir(train_generator)) # print(train_generator.samples) # print(train_generator.image_shape) # print(train_generator.classes) end_time = time.time() print('Spend time: {0} s'.format(end_time-start_time)) # - # %pdb off get_features(VGG16, (224, 224), 1) # + # get_features(VGG19, (224, 224), 1) # + # get_features(ResNet50, (224, 224), 1) # + # get_features(Xception, (299, 299), 1, xception.preprocess_input) # - get_features(InceptionV3, (299, 299), 1, inception_v3.preprocess_input) get_features(InceptionResNetV2, (299, 299), 1, inception_resnet_v2.preprocess_input) print('Done !')
SceneClassification2017/2. Feature_extraction_from_VGG16_to_InceptionResNetV2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Serbeld/Tensorflow/blob/master/PlaceHolders.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="lRwzKZHtIBDG" colab_type="text" # **Clase de Redes** # + [markdown] id="rTVvXG_rIJVh" colab_type="text" # Redes Neuronales en tensorflow # + id="19DeuBcoGcI5" colab_type="code" outputId="d060e8b8-39ab-4a3f-e818-af3140e7740f" colab={"base_uri": "https://localhost:8080/", "height": 35} import tensorflow as tf print(tf.__version__) # + id="V1fgWVlcJPi5" colab_type="code" colab={} p = tf.placeholder('float',None) # + id="axmK5ZIPJe7s" colab_type="code" colab={} operacion = p + 2 # + id="wQP2geubJj3I" colab_type="code" outputId="751b5ae9-eaf9-4824-d076-cc127f0e285a" colab={"base_uri": "https://localhost:8080/", "height": 35} with tf.Session() as sess: #sess.run(operacion) resultado = sess.run(operacion, feed_dict = {p:[1,2,3]}) print(resultado) # + id="LfLIPMMoKpiB" colab_type="code" colab={} p2 = tf.placeholder('float',[None,6]) # + id="Aslx8Uw5KyEb" colab_type="code" colab={} operacion2 = p2*5 # + id="v1cmJkeJLFUc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="f0ff0658-de71-45d7-8fc8-e4e064e78f0d" with tf.Session() as sess: datos = [[1,2,3,4,5,6],[7,8,9,10,11,12]] resultado2 = sess.run(operacion2, feed_dict = {p2:datos}) print(resultado2)
Class/PlaceHolders.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Display an image using Napari # ### Initial setup # + # %gui qt5 # Note that this Magics command needs to be run in a cell # before any of the Napari objects are instantiated to # ensure it has time to finish executing before they are # called from skimage import data from napari import ViewerApp # - # ### Display an image viewer = ViewerApp(data.moon())
examples/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dash Price # by: <NAME> # ### 1. Read Dataset import csv import pandas as pd import numpy as np df = pd.read_csv('../data/raw/bitcoin/dash_price.csv', parse_dates = ['Date']) df.tail() # ### 2. Data Investigation df.columns df.count() df.dtypes # #### There is no missing data here but there are several data which have different format. Some of the data do not use number format. # Change object to format number df['Volume'] = df['Volume'].apply(lambda x: float(str(x).replace(',',''))) df['Market Cap'] = df['Market Cap'].apply(lambda x: float(str(x).replace(',',''))) df.info() df.isnull().sum() df.tail() # Cek missing data missingdf = pd.DataFrame(df.isna().sum()).rename(columns = {0: 'total'}) missingdf['percent'] = missingdf['total'] / len(df) missingdf df.describe() # #### The data is clean no null value and has same format # ### 3. Data Visualization # Set Date as it's index df.set_index('Date', inplace = True ) # + # Visualization the average of Open based on time (Week) import matplotlib.pyplot as plt # %matplotlib inline plt.figure(figsize=(25, 25)) plt.subplot(3,3,1) plt.ylabel('Open') df.Open.plot() plt.title('Date vs Open') plt.subplot(3,3,2) plt.ylabel('Low') df.Low.plot() plt.title('Date vs Low') plt.subplot(3,3,3) plt.ylabel('High') df.High.plot() plt.title('Date vs High') plt.subplot(3,3,4) plt.ylabel('Close') df.Close.plot() plt.title('Date vs Close') plt.subplot(3,3,5) plt.ylabel('Volume') df.Volume.plot() plt.title('Date vs Volume') plt.subplot(3,3,6) plt.ylabel('Market Cap') df['Market Cap'].plot() plt.title('Date vs Market Cap') # -
notebooks/11. dash_price.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cv-homework # language: python # name: cv-homework # --- # # Excercise 2 # ## Import packages # + import numpy as np import scipy.ndimage import matplotlib.pyplot as plt import skimage.io import skimage.color import skimage.exposure import time import math import random # - # ## Task 1 # (2 points) # 1. Use $f = loss(100)$ which creates a 1D array that mimics a loss curve of some neural network. # 2. Implement the naive $\mathcal{O}(n w)$ mean filter to smooth the loss. # 3. Implement the improved $\mathcal{O}(n)$ mean filter to smooth the loss. # 4. Show all three curves in a single plot with a legend. # 5. Compare the runtime between the naive and improved implementation. # + # 1. def loss(n): return 1.0 / (0.01 * (np.arange(0, n) + 1)) + 3.0 * np.random.uniform(-1.00, 1.00, n) # TODO: generate f with loss helper function f = loss(100) # 2. def naive_running_mean(f, w): ''' Apply the naive running mean filter and return the smoothed values. f -- contains the values w -- window size ''' g = np.zeros(f.shape) for x in range(w, len(f) - w): sum = 0 for xp in range(x-w, x+w+1): sum += f[xp] g[x] = sum / (2 * w + 1) return g # 3. def improved_running_mean(f, w): ''' Apply the improved running mean filter and return the smoothed values. f -- contains the values w -- window size ''' f_summed = np.zeros(f.shape) f_summed[0] = f[0] for i in range(1, len(f)): f_summed[i] = f_summed[i-1] + f[i] g = np.zeros(f.shape) g[w] = f_summed[2*w] / (2 * w + 1) for x in range(w + 1, len(f) - w): g[x] = (f_summed[x+w] - f_summed[x-w-1]) / (2 * w + 1) return g # 4. ws = 5 # TODO: Plot all three curves in a single plot g_naive = naive_running_mean(f, ws) g_improved = improved_running_mean(f, ws) plt.plot(f, label="original f") plt.plot(g_naive, label="naive running mean") plt.plot(g_improved, label = "improved running mean") plt.legend() plt.show() # 5. n = 1000000 f = loss(n) # TODO: Measure and print the runtime of both methods in Milliseconds print("Measuring running time for " + str(n) + " data points") start = time.time() naive_running_mean(f, ws) end = time.time() print("Running time of naive running mean: " + str(end - start)) start = time.time() improved_running_mean(f, ws) end = time.time() print("Running time of impoved running mean: " + str(end - start)) # - # ## Task 2 # (4 points) # 1. Create numpy arrays for the horizontal and vertical Sobel kernel. # $$ # S_x =\begin{bmatrix} # -1 & -2 & -1 \\ # 0 & 0 & 0 \\ # 1 & 2 & 1 # \end{bmatrix} # $$ # # $$ # S_y =\begin{bmatrix} # -1 & 0 & 1 \\ # -2 & 0 & 2 \\ # -1 & 0 & 1 # \end{bmatrix} # $$ # # 2. Implement the $\textit{convolve}$ function with four nested loops. # 3. Load the image from the last exercise and detect its edges using the Sobel kernels and the $\textit{convolve}$ function. # 4. Implement the 1D $\textit{convolve_fast}$ function. Apply the seperated Sobel operators $u$ and $v$. Plot the result together with $\textit{convolve(S_x)}$ and compare the runtime. # 5. Implement a function that seperates a 2D kernel into 2 1D kernels. # 6. (Bonus) Use the function for a gaussian blur kernel and apply it on the image. # + # 1 # TODO Create S_x and S_y S_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) S_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) print('Horizontal Sobel kernel:\n', S_x) print('Vertical Sobel kernel:\n', S_y) # - # 2 def convolve(arr, ker): ''' Convolve the array using kernel K with four nested for loops. arr -- 2D array that gets convolved ker -- kernel ''' w = math.floor(ker.shape[0] / 2) arr_conv = np.zeros(arr.shape) for y in range(w, arr.shape[1] - w): for x in range(w, arr.shape[0] - w): conv_sum = 0 for y_ker in range(-w, w + 1): for x_ker in range(-w, w + 1): conv_sum += arr[x + x_ker, y + y_ker] * ker[x_ker + w, y_ker + w] arr_conv[x, y] = conv_sum return arr_conv # + # 3 # TODO: load ./data/pepo.jpg as a grayscale image image = skimage.io.imread("./data/pepo.jpg") image = skimage.color.rgb2gray(image) # TODO: Use the Sobel filter for edge detection: # Compute the Gradient Magnitude using both Sobel kernels edges_x = convolve(image, S_x) edges_y = convolve(image, S_y) edges = edges_x + edges_y # Show results _, axis = plt.subplots(1, 2) axis[0].imshow(image, cmap='gray') axis[1].imshow(edges, cmap='gray') plt.show() # + # 4 u = np.array([[1], [2], [1]]) v = np.array([[-1, 0, 1]]) S_x = np.dot(u, v) print('u =\n', u) print('v =\n', v) print('S_x =\n', S_x) def convolve_fast(arr, K_u, K_v): ''' Convolve the array using kernel K_u and K_v. arr -- 2D array that gets convolved K_u -- kernel u K_v -- kernel v ''' w = math.floor(K_u.shape[0] / 2) arr_conv = np.zeros(arr.shape) for x in range(w, arr.shape[0] - w): for y in range(w, arr.shape[1] - w): conv_sum = 0 for y_ker in range(-w, w + 1): conv_sum += arr[x, y + y_ker] * K_u[y_ker + w][0] arr_conv[x, y] = conv_sum arr = arr_conv arr_conv = np.zeros(arr.shape) for y in range(w, arr.shape[1] - w): for x in range(w, arr.shape[0] - w): conv_sum = 0 for x_ker in range(-w, w + 1): conv_sum += arr[x + x_ker, y] * K_v[0][x_ker + w] arr_conv[x, y] = conv_sum return arr_conv # TODO: Run both methods and compare their runtime start_naive = time.time() edges_naive = convolve(image, S_y) end_naive = time.time() start_improved = time.time() edges_improved = convolve_fast(image, u, v) end_improved = time.time() print("Time naive: " + str(end_naive - start_naive)) print("Time improved: " + str(end_improved - start_improved)) # Show results f, axis = plt.subplots(1, 2) f.set_figheight(15) f.set_figwidth(15) axis[0].imshow(edges_naive, cmap='gray') axis[1].imshow(edges_improved, cmap='gray') plt.show() # + # 5 from scipy.linalg import svd print('rank of s_x:', np.linalg.matrix_rank(S_x)) print('rank of s_y:', np.linalg.matrix_rank(S_y)) def separate(K): ''' Seperate the 2D kernel into 2 1D kernels. K -- 2D kernel ''' U, s, VT = svd(K) U *= -1 VT *= -1 K_u = np.array([U[:,0] * s[0]]).T K_v = np.array([VT[0]]) return K_u, K_v K_u, K_v = separate(S_x) print('K_u =\n', K_u) print('K_v =\n', K_v) print('K =\n', np.dot(K_u, K_v)) # + # 6 Bonus from scipy import signal def gaussian_kernel(kernel_size, sigma): ''' Return a 2D gaussian kernel. kernel_size -- size of the kernel sigma -- sigma of the gaussian blur ''' kernel_1d = signal.gaussian(kernel_size, std=sigma).reshape(kernel_size, 1) h = np.outer(kernel_1d, kernel_1d) return h K = gaussian_kernel(15, 15) K_u, K_v = separate(K) # TODO: Run both methods and compare their runtime start_naive = time.time() conv_x_naive = convolve(image, K) end_naive = time.time() start_improved = time.time() conv_x_improved = convolve_fast(image, K_u, K_v) end_improved = time.time() print("Time naive: " + str(end_naive - start_naive)) print("Time improved: " + str(end_improved - start_improved)) # Plot the results _, axis = plt.subplots(1, 2) axis[0].imshow(conv_x_naive, cmap='gray') axis[1].imshow(conv_x_improved, cmap='gray') plt.show() # - # ## Task 3 # (4 points) # # 1. Implement the naive max function using nested loops. # 2. Implement the $\mathcal{O}(n log w)$ faster max function using a binary tree. # + #1 def naive_max(arr, ws): ''' Return the maximum-filtered array arr -- 2D array ws -- window size ''' g = np.zeros(len(arr)) for x in range(len(arr)): max = 0 for xp in range(x-ws, x+ws+1): if xp < 0 or xp > len(arr) - 1: continue if(arr[xp] > max): max = arr[xp] g[x] = max return g # Generate some random data to filter f = [] for i in range(25): f.append(random.randint(0, 100)) print('input = ', f) print('naive_max =', naive_max(f, 3)) # + #2 class Node: def __init__(self): self.max = -np.inf # value of this node self.top = None # reference to parent node self.left = None # left child node self.right = None # right child node # HINT: this list will hold a reference to all leaf nodes that # are children of this node. # You will need later to add new and replace old values # in the tree. self.elems = [] # list of all child leaf nodes def build_tree(self, depth, top=None): ''' Build up a tree of certain depth depth -- tree depth top -- parent node ''' self.top = top if depth > 0: # TODO: recursively initialize all children self.left = Node() self.left.build_tree(depth-1, self) self.right = Node() self.right.build_tree(depth-1, self) # TODO: concatenate elems lists of children # HINT: nodes that are not leafes should not be added # to self.elems self.elems += self.left.elems self.elems += self.right.elems else: # TODO: this is a leaf node # HINT: this node has to be added to the elems list self.elems.append(self) return self.elems def update(self): ''' Update the value (self.max) of this node and its parent nodes recursively ''' if self.top is None: return if (self.top.left.max > self.top.right.max): self.top.max = self.top.left.max else: self.top.max = self.top.right.max self.top.update() class MaxTree: def __init__(self, ws): ''' ws -- window size ''' # TODO: compute number of leafes and tree depth (length = 2 * ws + 1) self.leafes = 2 * ws + 1 self.depth = math.floor(np.log2(2 * self.leafes)) # TODO: initialize root node self.root = Node() self.root.build_tree(self.depth) # init pointer to next element to be replaced self.ptr = 0 def replace_elem(self, value): ''' Replace an element (value of leaf node) in the tree value -- value of new element ''' # HINT: use self.ptr as a pointer to the next # element that has to be replaced leaf_node = self.root.elems[self.ptr % self.leafes] leaf_node.max = value leaf_node.update() self.ptr += 1 pass def get_max(self): return self.root.max def fast_max(arr, ws): ''' Return the maximum-filtered array arr -- 2D array ws -- window size ''' max_arr = [] # TODO: initialize tree tree = MaxTree(ws) # TODO: compute maximum-filtered output array for x in range(len(arr)): tree.replace_elem(arr[x]) max_arr.append(tree.get_max()) return max_arr print('input = ', f) print('fast_max =', fast_max(f, 3)) # - # We now use your implementation to filter an image with different window sizes. # Note, that for small windows, the naive method is indeed faster due to the lower overhead. # However, as it has a much better time complexity, the tree method is much faster for large windows. # + # Nothing to do here ;) def image_max(arr, ws, method): ''' Apply a max-filter to an image arr -- input image ws -- window size method -- filter function ''' out = np.zeros_like(arr) for y in range(arr.shape[0]): out[y] = method(arr[y], ws) for x in range(arr.shape[1]): out[:, x] = method(out[:, x], ws) return out for ws in [2, 4, 8, 16, 32]: print('\nFilter with window size {}'.format(ws)) start_time = time.time() max_img_naive = image_max(image, ws, naive_max) naive_time = time.time() - start_time print('Naive implementation took {:.3f} ms'.format(1000 * naive_time)) start_time = time.time() max_img_improved = image_max(image, ws, fast_max) improved_time = time.time() - start_time print('Improved implementation took {:.3f} ms'.format(1000 * improved_time)) _, axis = plt.subplots(1, 2) axis[0].imshow(max_img_naive, cmap='gray') axis[1].imshow(max_img_improved,cmap='gray') plt.show() # -
2.0-tl-image-filter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SCTR # SCTR是用来表达一个股票在一组股票里的相对强度,sctr_rank 为100表明最强的那支股票 https://school.stockcharts.com/doku.php?id=technical_indicators:sctr from tps import common from tps import tradebook as tb import pandas as pd import re g_symbol_df = common.load_symbol_csv('./symbol_ib.txt') ohlc_dct = tb.get_symbol_ohlc(g_symbol_df,fpath='./cache_example/') # + cfg="""{ "algo":[ { "name":"macd", "param":{} }, { "name":"nhnl", "param":{ "pct":"0.99" } } ] }""" algo_dct=tb.load_algo_pipeline(cfg) # - ohlc_dct # + import pandas as pd df_raw = {'symbol':[],'px':[]} for symbol,ohlc in ohlc_dct.items(): if ohlc.empty: print(symbol, "ohlc is empty") continue for an,algo_inx in algo_dct.items(): ind_dct = algo_inx.run_algo(ohlc) #ind_dct['px'] = ohlc['Close'].iloc[-1] if ind_dct: for cn in ind_dct: if cn not in df_raw: df_raw[cn]=[] lst = df_raw[cn] lst.append(ind_dct[cn]) df_raw['symbol'].append(symbol) df_raw['px'].append(ohlc['Close'].iloc[-1]) df= pd.DataFrame.from_dict(df_raw, orient='columns', dtype=None) # - df.sort_values(by=['ma10_sig'], inplace=True, ascending=['True']) df cfg = """{ "criteria_up":[ "ma10_sig>0", "slo10>slo20", "slo20>slo50" ], "criteria_nhnl":[ "ma10_sig>0", "sth_sig<8" ], "criteria_ma":[ "ma10_sig>0", "ma20_sig>0", "ma50_sig>0" ], "criteria_sth":[ "px>0.98*sth" ], "criteria_200":[ "px>0.98*ma200", "px<1.02*ma200" ], "criteria":[ "ma10_sig>0", "lth_sig>0" ], "columns":[ "symbol","px", "macd","ma10%","ma20%","ma50%","ma10","ma20","ma50","ma200","volra20","vol20_sig","ma10_sig","ma20_sig","ma50_sig","slo10","slo20","slo50", "sth","lth","stl","ltl","lth_sig","sth_sig","ltl_sig","stl_sig" ], "sort":[ {"ma10_sig":"True"}, {"ma50_sig":"True"} ] }""" import json rule=json.loads(cfg) # + import re if 'criteria' in rule: criteria = rule['criteria'] else: criteria = [] collst = [] if not criteria: print("criteria is empty,...take the original table") else: # filter by dynamic criteria string crstr = "" pattern1 = "([a-zA-Z][A-Za-z0-9-_]*)" pattern2 = "[></]" for cr in criteria: print("processing cr", cr) if cr[0] == '@': # TODO handle parameter continue collst = re.findall(pattern1, cr) # find all related columns ration = re.findall(pattern2, cr) if len(ration) != 0: cr0 = re.sub(pattern1, r"df['\1']", cr) # put df[] surround pattern / substitute if crstr == "": # first criteria crstr = crstr + "(" + cr0 + ") " else: crstr = crstr + "& (" + cr0 + ") " print("\tto evaluate criteria(logical) = %s" % crstr) if crstr != "": df = df[eval(crstr)] # =========================================================== #print(g_rule) if 'columns' in rule: output_cols = rule['columns'] if not output_cols: output_cols.extend(df.columns.values) #print('===========================================',output_set) for col in collst: if col not in output_cols: # keep origin order output_cols.append(col) df = df[output_cols] if 'sort' in rule: sort_list = rule['sort'] if sort_list: sort_cols=[] asc_list=[] for dct in sort_list: for key,value in dct.items(): sort_cols.append(key) asc_list.append(False if value=="False" else True) df.sort_values(by=sort_cols, inplace=True, ascending=asc_list) pass dfscan = df # - dfscan # + import plotly.graph_objects as go import pandas as pd ohlc=ohlc_dct['AAPL'] fig = go.Figure(data=go.Ohlc(x=ohlc['Date'], open=ohlc['Open'], high=ohlc['High'], low=ohlc['Low'], close=ohlc['Close'])) fig.show() # - # # SCTR # SCTR是用来表达一个股票在一组股票里的相对强度,sctr_rank 为100表明最强的那支股票 # https://school.stockcharts.com/doku.php?id=technical_indicators:sctr # + import json cfg="""{ "algo":[ { "name":"sctr", "param":{} } ] }""" rule=json.loads(cfg) algo_dct=tb.load_algo_pipeline(rule) # - df=tb.run_algo(ohlc_dct,algo_dct) df # 过滤 sctr>sctr5天前>sctr10天前,表示sctr排名在上升 cfg = """{ "criteria":[ "sctr_rank>sctr_r5", "sctr_r5>sctr_r10" ], "columns":[ "symbol","px", "stok","sctr","sctr_rank","sctr_r5","sctr_r10","sctr_r30" ], "sort":[ {"sctr_rank":"False"} ] }""" import json criteria=json.loads(cfg) df_scan=tb.run_scan(df,criteria) df_scan ohlc from algo import tps_ind ohlc = tps_ind.MA(ohlc, [50, 200]) ohlc ohlc['Close'].values
test_refactor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Convolutional Neural Networks # --- # In this notebook, we train a **CNN** to classify images from the CIFAR-10 database. # # The images in this database are small color images that fall into one of ten classes; some example images are pictured below. # # <img src='notebook_ims/cifar_data.png' width=70% height=70% /> # ### Test for [CUDA](http://pytorch.org/docs/stable/cuda.html) # # Since these are larger (32x32x3) images, it may prove useful to speed up your training time by using a GPU. CUDA is a parallel computing platform and CUDA Tensors are the same as typical Tensors, only they utilize GPU's for computation. # + import torch import numpy as np # check if CUDA is available train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print("CUDA is not available. Training on CPU ...") else: print("CUDA is available! Training on GPU ...") # - # --- # ## Load and Augment the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html) # # Downloading may take a minute. We load in the training and test data, split the training data into a training and validation set, then create DataLoaders for each of these sets of data. # # #### Augmentation # # In this cell, we perform some simple [data augmentation](https://medium.com/nanonets/how-to-use-deep-learning-when-you-have-limited-data-part-2-data-augmentation-c26971dc8ced) by randomly flipping and rotating the given image data. We do this by defining a torchvision `transform`, and you can learn about all the transforms that are used to pre-process and augment data, [here](https://pytorch.org/docs/stable/torchvision/transforms.html). # # #### TODO: Look at the [transformation documentation](https://pytorch.org/docs/stable/torchvision/transforms.html); add more augmentation transforms, and see how your model performs. # # This type of data augmentation should add some positional variety to these images, so that when we train a model on this data, it will be robust in the face of geometric changes (i.e. it will recognize a ship, no matter which direction it is facing). It's recommended that you choose one or two transforms. # + from torchvision import datasets import torchvision.transforms as transforms from torch.utils.data.sampler import SubsetRandomSampler # number of subprocesses to use for data loading num_workers = 0 # how many samples per batch to load batch_size = 20 # percentage of training set to use as validation valid_size = 0.2 # convert data to a normalized torch.FloatTensor transform = transforms.Compose( [ transforms.RandomHorizontalFlip(), # randomly flip and rotate transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] ) # choose the training and test datasets train_data = datasets.CIFAR10("data", train=True, download=True, transform=transform) test_data = datasets.CIFAR10("data", train=False, download=True, transform=transform) # obtain training indices that will be used for validation num_train = len(train_data) indices = list(range(num_train)) np.random.shuffle(indices) split = int(np.floor(valid_size * num_train)) train_idx, valid_idx = indices[split:], indices[:split] # define samplers for obtaining training and validation batches train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(valid_idx) # prepare data loaders (combine dataset and sampler) train_loader = torch.utils.data.DataLoader( train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers ) valid_loader = torch.utils.data.DataLoader( train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers ) test_loader = torch.utils.data.DataLoader( test_data, batch_size=batch_size, num_workers=num_workers ) # specify the image classes classes = [ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck", ] # - # ### Visualize a Batch of Training Data # + import matplotlib.pyplot as plt # %matplotlib inline # helper function to un-normalize and display an image def imshow(img): img = img / 2 + 0.5 # unnormalize plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image # + # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # convert images to numpy for display # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) # display 20 images for idx in np.arange(20): ax = fig.add_subplot(2, 20 / 2, idx + 1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title(classes[labels[idx]]) # - # ### View an Image in More Detail # # Here, we look at the normalized red, green, and blue (RGB) color channels as three separate, grayscale intensity images. # + rgb_img = np.squeeze(images[3]) channels = ["red channel", "green channel", "blue channel"] fig = plt.figure(figsize=(36, 36)) for idx in np.arange(rgb_img.shape[0]): ax = fig.add_subplot(1, 3, idx + 1) img = rgb_img[idx] ax.imshow(img, cmap="gray") ax.set_title(channels[idx]) width, height = img.shape thresh = img.max() / 2.5 for x in range(width): for y in range(height): val = round(img[x][y], 2) if img[x][y] != 0 else 0 ax.annotate( str(val), xy=(y, x), horizontalalignment="center", verticalalignment="center", size=8, color="white" if img[x][y] < thresh else "black", ) # - # --- # ## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html) # # This time, you'll define a CNN architecture. Instead of an MLP, which used linear, fully-connected layers, you'll use the following: # * [Convolutional layers](https://pytorch.org/docs/stable/nn.html#conv2d), which can be thought of as stack of filtered images. # * [Maxpooling layers](https://pytorch.org/docs/stable/nn.html#maxpool2d), which reduce the x-y size of an input, keeping only the most _active_ pixels from the previous layer. # * The usual Linear + Dropout layers to avoid overfitting and produce a 10-dim output. # # A network with 2 convolutional layers is shown in the image below and in the code, and you've been given starter code with one convolutional and one maxpooling layer. # # <img src='notebook_ims/2_layer_conv.png' height=50% width=50% /> # # #### TODO: Define a model with multiple convolutional layers, and define the feedforward metwork behavior. # # The more convolutional layers you include, the more complex patterns in color and shape a model can detect. It's suggested that your final model include 2 or 3 convolutional layers as well as linear layers + dropout in between to avoid overfitting. # # It's good practice to look at existing research and implementations of related models as a starting point for defining your own models. You may find it useful to look at [this PyTorch classification example](https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py) or [this, more complex Keras example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py) to help decide on a final structure. # # #### Output volume for a convolutional layer # # To compute the output size of a given convolutional layer we can perform the following calculation (taken from [Stanford's cs231n course](http://cs231n.github.io/convolutional-networks/#layers)): # > We can compute the spatial size of the output volume as a function of the input volume size (W), the kernel/filter size (F), the stride with which they are applied (S), and the amount of zero padding used (P) on the border. The correct formula for calculating how many neurons define the output_W is given by `(W−F+2P)/S+1`. # # For example for a 7x7 input and a 3x3 filter with stride 1 and pad 0 we would get a 5x5 output. With stride 2 we would get a 3x3 output. # + import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): def __init__(self): super(Net, self).__init__() # convolutional layer (sees 32x32x3 image tensor) self.conv1 = nn.Conv2d(3, 16, 3, padding=1) # convolutional layer (sees 16x16x16 tensor) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) # convolutional layer (sees 8x8x32 tensor) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) # max pooling layer self.pool = nn.MaxPool2d(2, 2) # linear layer (64 * 4 * 4 -> 500) self.fc1 = nn.Linear(64 * 4 * 4, 500) # linear layer (500 -> 10) self.fc2 = nn.Linear(500, 10) # dropout layer (p=0.25) self.dropout = nn.Dropout(0.25) def forward(self, x): # add sequence of convolutional and max pooling layers x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) # flatten image input x = x.view(-1, 64 * 4 * 4) # add dropout layer x = self.dropout(x) # add 1st hidden layer, with relu activation function x = F.relu(self.fc1(x)) # add dropout layer x = self.dropout(x) # add 2nd hidden layer, with relu activation function x = self.fc2(x) return x # create a complete CNN model = Net() print(model) # move tensors to GPU if CUDA is available if train_on_gpu: model.cuda() # - # ### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html) # # Decide on a loss and optimization function that is best suited for this classification task. The linked code examples from above, may be a good starting point; [this PyTorch classification example](https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py) or [this, more complex Keras example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py). Pay close attention to the value for **learning rate** as this value determines how your model converges to a small error. # # #### TODO: Define the loss and optimizer and see how these choices change the loss over time. # + import torch.optim as optim # specify loss function (categorical cross-entropy) criterion = nn.CrossEntropyLoss() # specify optimizer optimizer = optim.SGD(model.parameters(), lr=0.01) # - # --- # ## Train the Network # # Remember to look at how the training and validation loss decreases over time; if the validation loss ever increases it indicates possible overfitting. # + # number of epochs to train the model n_epochs = 30 valid_loss_min = np.Inf # track change in validation loss for epoch in range(1, n_epochs + 1): # keep track of training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(train_loader): # move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss += loss.item() * data.size(0) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(valid_loader): # move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss += loss.item() * data.size(0) # calculate average losses train_loss = train_loss / len(train_loader.sampler) valid_loss = valid_loss / len(valid_loader.sampler) # print training/validation statistics print( "Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}".format( epoch, train_loss, valid_loss ) ) # save model if validation loss has decreased if valid_loss <= valid_loss_min: print( "Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...".format( valid_loss_min, valid_loss ) ) torch.save(model.state_dict(), "model_augmented.pt") valid_loss_min = valid_loss # - # ### Load the Model with the Lowest Validation Loss model.load_state_dict(torch.load("model_augmented.pt")) # --- # ## Test the Trained Network # # Test your trained model on previously unseen data! A "good" result will be a CNN that gets around 70% (or more, try your best!) accuracy on these test images. # + # track test loss test_loss = 0.0 class_correct = list(0.0 for i in range(10)) class_total = list(0.0 for i in range(10)) model.eval() # iterate over test data for batch_idx, (data, target) in enumerate(test_loader): # move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update test loss test_loss += loss.item() * data.size(0) # convert output probabilities to predicted class _, pred = torch.max(output, 1) # compare predictions to true label correct_tensor = pred.eq(target.data.view_as(pred)) correct = ( np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy()) ) # calculate test accuracy for each object class for i in range(batch_size): label = target.data[i] class_correct[label] += correct[i].item() class_total[label] += 1 # average test loss test_loss = test_loss / len(test_loader.dataset) print("Test Loss: {:.6f}\n".format(test_loss)) for i in range(10): if class_total[i] > 0: print( "Test Accuracy of %5s: %2d%% (%2d/%2d)" % ( classes[i], 100 * class_correct[i] / class_total[i], np.sum(class_correct[i]), np.sum(class_total[i]), ) ) else: print("Test Accuracy of %5s: N/A (no training examples)" % (classes[i])) print( "\nTest Accuracy (Overall): %2d%% (%2d/%2d)" % ( 100.0 * np.sum(class_correct) / np.sum(class_total), np.sum(class_correct), np.sum(class_total), ) ) # - # ### Visualize Sample Test Results # + # obtain one batch of test images dataiter = iter(test_loader) images, labels = dataiter.next() images.numpy() # move model inputs to cuda, if GPU available if train_on_gpu: images = images.cuda() # get sample outputs output = model(images) # convert output probabilities to predicted class _, preds_tensor = torch.max(output, 1) preds = ( np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy()) ) # plot the images in the batch, along with predicted and true labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(20): ax = fig.add_subplot(2, 20 / 2, idx + 1, xticks=[], yticks=[]) imshow(images[idx] if not train_on_gpu else images[idx].cpu()) ax.set_title( "{} ({})".format(classes[preds[idx]], classes[labels[idx]]), color=("green" if preds[idx] == labels[idx].item() else "red"), ) # -
deep_learning_v2_pytorch/convolutional-neural-networks/cifar-cnn/cifar10_cnn_augmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=true editable=true # # Structural Equation Models of Collective Identity # ## Date: 27/01/2017 # # ## Relevant theories to model: # 1. Origins of Collective Identity: # 1. Similarity (proximity & homophily) # 2. Working for a common Goal # # 2. Effects of CI on Social Influence: # 1. Desire for approval (normative factors) # 2. Trust and Credibility (cognitive factors) # 3. Dissonance Reduction (cognitive factors) # 4. Out-group Differentiation (affective factors) # # 3. Effects of CI on Individual Behavior: # 1. Subordination of self to group interest # 2. Positive emotional arousal (in-group attraction) # 3. Negative emotional arousal (out-group hatred, anger) # # **These theories, in general appear as latent variables (multiple ways to describe; measurements unknown). The structural equation model we consider takes the following form:** # + [markdown] deletable=true editable=true # ![title](img/structEquationModel.png) # + [markdown] deletable=true editable=true # **Transfer Criterion:** Applicability to "other" problems involves the establishment (i.e., factor analysis) of relations between experimental measurements and the latent factors in the pathway to/from collective identity. # + [markdown] deletable=true editable=true # **References/Data for Latent Interactions:** # # 1. L1: Similarity - Proximity Relations -> Collective Identity # 1. Theory: # * [Prior Bonds; Solidaristic Behavior - > likely to contribute](https://deepblue.lib.umich.edu/bitstream/handle/2027.42/50928/153.pdf?sequence=1) # # * [Prior Ties, Social Norms, pre-existing solidarities, etc.](http://) # * [Levels of Collective Identity in terms of Similarity](http://psycnet.apa.org/journals/psp/71/1/83.pdf&uid=1996-01782-006) # # 2. Data Sources: # # 2. L2: Similarity - Homophily Relations -> Collective Identity # 1. Theory: # # * [Online Hyperlinks; Online Collective Identity](http://www.sciencedirect.com/science/article/pii/S0378873311000153) # # * [Critical Mass for Collective Action on Networks](http://journals.sagepub.com/doi/abs/10.1177/1043463112473734) # # 2. Data Sources / Experimental Studies: # # * [Homophily in the Adoption of Health Behavior](http://science.sciencemag.org/content/334/6060/1269/tab-figures-data) # # 3. L3: Working for a Common Goal -> Collective Identity # 1. Theory: # # * [Shared Interests Need Selective Rewards; Olson, 1965](https://books.google.com/books?hl=en&lr=&id=jv8wTarzmsQC&oi=fnd&pg=PR9&dq=The+Logic+of+Collective+Action&ots=m9oKrXSqX-&sig=ol4CCa8EBGIeucwvZRxY5jkD9mI#v=onepage&q=The%20Logic%20of%20Collective%20Action&f=false) # # * [Rational Perspective: Benefits for those whos share collective identity [Friedman & McAdam]](https://books.google.com/books?hl=en&lr=&id=2kxcGwv2_u4C&oi=fnd&pg=PA156&ots=xXsQCqpaYJ&sig=Inj5DsD-Gi4vGtXirTmgHW0LGrs#v=onepage&q&f=false) # # * [Using Discourse to Establish an Intergroup Collective Identity](http://www.jstor.org.ezproxy.lib.vt.edu/stable/pdf/20159095.pdf) # # * [Emergent Rationality](http://journals.sagepub.com/doi/pdf/10.1177/104346397009004003) # # 2. Data Sources: # # 4. L4: Collective Identity -> Desire for Approval # 1. Theory: # * [Social Approval Alone is not enough to reduce "free-riding"](http://www.sciencedirect.com/science/article/pii/S0167268199000451) # 2. Data Sources: # # 5. L5: Collective Identity -> Dissonance Reduction # 1. Theory: # * [Ingroup Strength from Dissonance Reduction](http://journals.sagepub.com/doi/abs/10.1177/0146167208329216) # * [social support on dissonance](http://psycnet.apa.org/journals/gdn/7/3/214.pdf&uid=2003-07815-003) # * [Individual Identity Affected when Group Identity Jeopardised](http://journals.sagepub.com/doi/abs/10.1177/0959354302121005) # 2. Data Sources: # * [Ingroup Strength from Dissonance Reduction](http://journals.sagepub.com/doi/abs/10.1177/0146167208329216) # # 6. L6: Collective Identity -> Trust and Credibility # 1. Theory: # * [Cooperation & Trust in Prisonner's Dilemma Game](https://papers.ssrn.com/sol3/papers2.cfm?abstract_id=956080) # * [Reciprocity Builds Trust and Social Identity](http://onlinelibrary.wiley.com/doi/10.1002/ejsp.256/full) # * [Group processes across Cultures; trust based on categorical v. interpersonal membership](http://journals.sagepub.com/doi/abs/10.1177/0146167204271305) # * [Trust Emerges Locally & Then Spreads to Outsiders Through Neighbor Interactions (model)](http://www.jstor.org/stable/2657332?seq=1#page_scan_tab_contents) # # 2. Data Sources: # # 7. L7: Collective Identity -> Out-group Differentiation # 1. Theory: # * [Individuals in a group anonymous to outsiders predisposes them to act in terms of social identity; but limits the power of the out-group](http://onlinelibrary.wiley.com/doi/10.1111/j.2044-8309.1994.tb01015.x/full) # * [Out-group cues in determining social identity](http://psycnet.apa.org/journals/psp/47/2/342.pdf&uid=1985-01243-001) # * [Resource Allocation](http://psycnet.apa.org/journals/psp/46/5/1044.pdf&uid=1984-25827-001) # 2. Data Sources: # * [In-group/Out-group differentiation in work and giving.](http://www.sciencedirect.com/science/article/pii/S0167268109001310) # # 8. L8: Collective Identity -> Subordination of Self to Group Interest # 1. Theory: # * [Different Definition of Identity to Include Social](http://journals.sagepub.com/doi/abs/10.1177/0146167291175001) # * [Groups Reward Individual Sacrifice](http://journals.sagepub.com/doi/abs/10.1177/000312240907400102) # * [Motives for Individual Participation in Intergroup Conflict](http://journals.sagepub.com/doi/abs/10.1111/j.1467-9280.2008.02100.x) # 2. Data Sources: # * [Viral Epidemic Social Distancing](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1000793) # # 9. L9: Collective Identity -> Positive Emotional Arousal # 1. Theory: # * [people identify more strongly with collective when collective identity was made more salient than when personal identity is salient](http://dlc.dlib.indiana.edu/dlc/bitstream/handle/10535/3000/Back_to_Journal_Cover_Page.pdf?sequence=1&isAllowed=y) # # 2. Data Sources: # # 10. L10: Collective Identity -> Negative Emotional Arousal # 1. Theory: # * [Individual Identity Threatened Effects In-Group / Out-Group Extemity](http://journals.sagepub.com/doi/abs/10.1177/0146167293194003) # # 2. Data Sources: # + import numpy import scipy from matplotlib import pyplot as plt # %matplotlib inline # + [markdown] deletable=true editable=true # ## Public Goods Game # [Group size and the voluntary provision of public goods](http://www.dklevine.com/archive/refs411.pdf) # # In the public goods game , we assume there are N individuals involved in the game. Each individual i has a personal (individual) account where $Z_i$ tokens was endowed. There is a group account to collect tokens from each individual. # For a given round, let $m_i$ denotes individual i's allocation of tokens to the group account and $\sum m_j$ representas the sum of tokens placed in the group account by all other individuals. Each individual earned $e_i = \frac{G(m_i + \sum m_j)}{N}$ tokens from the group account. # # Each token placed in the personal account earned $p_i$ tokens with certainty. A representative individual's utility function in any one period can be written as $U_i[p_i(Z_i-m_i) + e_i]$. # # The Marginal Per Capita Return(MPCR) from the group is defined as the ratio of benefits to costs for moving a single token from the individual to the group account. In this experiment, $p_i$ and the function $G()$ were chosen so that the Pareto optimum was for each individual to place all tokens in the group account (ie.e to set $m_i = Z_i$). # + [markdown] deletable=true editable=true # ## Brian's edits # **Yue, in this paper, they do not look at sociodemographic variables; their particpants were microeconomics students at Arizona and Indiana universities... we can't include the following variables here. See table 1 for IVars + that they did multiple rounds.** # # IVars = { # "Procedure": ['MS-XC', 'SS-$'], # "GroupSize": [4,10,40,100], # "MPCR": [.03, .30, .75], # "Round": range(10), # } # # DVars = {"percentContribution": range(100)} # # **Assume a function like:** # # $$perc\_contribution = f(GroupSize,MPCR,Round) + \mathcal{N}(\sigma,\nu)$$ # # I would solve this as follows: # # 1. Look at the charts in figures 1-5. These are all organized by the independent variables listed in Table 1. Ignore the procedure IV or only look at the XC procedures. # # 2. Generate data from these charts by creating data samples by sampling from a normal distribution with a mean at each independent variable and a variance according the the 90% confidence bands. # # 3. Using a linear, some other regression, or a combination of techniques; find a best-fit for f - be able to interpolate and extrapolate between and outside the domain of evaluation for the IVs. # # 4. Calculate the error for each attempt you make. # # 5. The model will not be perfect; just try to fit the data points as best as possible. # + [markdown] deletable=true editable=true # ## Updates # # Given a set of data samples (X, y) where X is a matrix $N*P$, y is a response vection $N*1$. Each row in matrix X is one data example. Its corresponded response is the value in y. It estimates the output y by a linear mapping of X: $p = XB$ where b is a $P*1$ vector that to be optimized. # The objective function for Lasso is described as follows in the so-called Lagrangian form: # > $\frac{1}{2N}||y - Xw||^2_2 + \alpha ||w||$ # # Data samples given in this paper can be organized as follows: # # | Groupsize | MPCR | round | GroupContribution($\hat{y}$)| confidence interval $\sigma$ | # |:---:|:---:|:---:|:---:|:---:| # | 4 | 0.3 | 1 | 36| - | # | 4 | 0.3 | 2 | 34| - | # | 4 | 0.3 | 3 | 28| - | # | ... | ... | ... | ...| ... | # # # In this case, p=3 (we have three features for x), $\alpha=0.5$. In total there are 80 data samples in this paper with varying groupsize, MPCR, and round. The algorithm used to fit the model is coordinate descent. # After fitting the dataset, we get an estimate function as follows: # $$\hat{y} = w_1 * x_1 + w_2 * x_2 + w_3 * x_3 + b$$ # where b is the intercept and $w_1, w_2, w_3$ are coefficients. # # Using the [Lasso package from Scikit learn](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html), the estimated coefficients and intercept are: # # > * $w_1 = 0.025$ # > * $w_2 = 9.1239$ # > * $w_3 = -1.632$ # > * $b = 41.81$ # # ** Next steps:** Sample the group distribution with ($y$) subject to the $\mathcal{N}(\hat{y},\sigma)$. Run over multiple trials to produce # + [markdown] deletable=true editable=true # ### Method # # We sample new group distributions with y as mean and sigma as the standard deviation (see function resample). # After sampling group distribution $K=10000$ times, we fit Lasso models to each set of the sampled data. # The following plots show the distribution of $w_1, w_2, w_3, b$ from the Lasso models on the new sampled group distributions. # # # ### Input Variables # - data from paper [Group size and the voluntary provision of public goods] # - each example (line) has groupsize, MPCR, rndcnt, groupContribution, Experiments, diff # # ### Output # # A dat file for each sample contains: # - groupsize # - MPCR # - round # - groupContribution # + deletable=true editable=true n, bins, patches = plt.hist(bs, 50, normed=1, facecolor='green', alpha=0.75) plt.xlabel('b') plt.ylabel('Probability') plt.title(r'$\mathrm{Histogram\ of\ b}$') plt.grid(True) plt.show() # + deletable=true editable=true predefined_colors = ['red', 'blue', 'yellow'] for j in xrange(len(ws[0])): wj = [ws[i][j] for i in range(len(ws))] n, bins, patches = plt.hist(wj, 50, normed=1, facecolor=predefined_colors[j], alpha=0.75) plt.xlabel('w%d' % (j+1)) plt.ylabel('Probability') plt.grid(True) plt.show() fig = plt.figure() fig.savefig("w%d_hist.png" % (j+1), dpi=fig.dpi) # + deletable=true editable=true import sys import os import json import numpy as np from numpy.random import normal from sklearn import linear_model import matplotlib.mlab as mlab import matplotlib.pyplot as plt # %matplotlib inline def resample(observed_Y, Z): # Draw random samples from a normal (Gaussian) distribution. newY = [] for idy, y in enumerate(observed_Y): mu = y sigma = Z[idy] ny = normal(loc=mu, scale=sigma) newY.append(ny) return newY # Initialize a Lasso model with alpha=0.5 clf = linear_model.Lasso(alpha=0.5) print clf datafile= '../data/data_paper_groupsize.dat' X = [] # array of features Y = [] # array of observed group contribution Z = [] # array of standard deviation K = 10000 # iteration of sampling # Read data from file # Data source: http://www.dklevine.com/archive/refs411.pdf with open(datafile, 'rb') as reader: reader.readline() for l in reader.readlines(): groupsize, MPCR, rndcnt, groupContribution, Experiments, diff = l.strip().split('\t') X.append([float(groupsize), float(MPCR), float(rndcnt)]) Y.append(float(groupContribution)) if float(diff) == 0.0: stdev = 1.0 else: # Obtaining standard deviations from standard errors and confidence intervals for group means # http://handbook.cochrane.org/chapter_7/7_7_3_2_obtaining_standard_deviations_from_standard_errors_and.htm stdev = np.sqrt(float(Experiments)) * float(diff) / 3.29 Z.append(stdev) ws = [] bs = [] for i in xrange(K): newY = resample(Y, Z) clf.fit(np.array(X), np.array(newY)) ws.append(clf.coef_) bs.append(clf.intercept_) # Save synthetic data into files with open('../data/public_goods/synthetic_data_sample_%d.dat' % i, 'wb') as reader: reader.write('groupsize\tMPCR\tround\tgroupContribution\n') for didx, x in enumerate(X): oneline = '\t'.join([str(xi) for xi in x]) reader.write(oneline + '\t' + str(newY[didx]) + '\n') # + [markdown] deletable=true editable=true # ## Ultimatum Game # [Explaining fairness in complex environments](http://www.kevinzollman.com/uploads/5/0/3/6/50361245/zollman_-_explaining_fairness_in_a_complex_environment.pdf) (<NAME> 2008) # # # In the ultimatum game, one player (or group) choose a number between zero and the total good, while the other player (or group) must choose which demands to accept or reject. This ultimatum game has several Nash equilibria. # + [markdown] deletable=true editable=true # Suppose we restrict the game to three demands (1/3, 1/2, 2/3) and three ranges of acceptability ([1/3, 1], [1/2, 1], [2/3, 1]), # this ultimatum game is transformed into a simultaneous-move game. Assuming player A proposes and player B decides to accept or reject. The states based on the demands of A and acceptance state of B are presented in the following table where (X, Y) represents the money received for player B and player A. # # # | Acceptability Range of B | A Demands 1/3 | A Demands 1/2 | A Demands 2/3 | # |:---:|:---:|:---:|:---:| # | [1/3, 1] | (2/3, 1/3) | (1/2, 1/2) | (1/3, 2/3) | # | [1/2, 1] | (2/3, 1/3) | (1/2, 1/2) | (0, 0)| # | [2/3, 1] | (2/3, 1/3) | (0, 0) | (0, 0) | # + [markdown] deletable=true editable=true # In this case, the [1/3, 1] weakly dominates all other strategies since it does equally well as the others against some opponents' strategies, but does better than both against Demand 2/3. # # + [markdown] deletable=true editable=true # The ordered pair $<a, b>$ denotes a strategy where $a$ is the proposal and $b$ is the minimum acceptable. # Assuming the population is N = 10, at each round, we sample $50\%$ of the population to be proposer and $50\%$ of them to be acceptor. # The strategy is uniformly distributed among the players. # # # For instance, $33\%$ of the proposers will demand $1/3$, $33\%$ demand $1/2$, and $33\%$ demand $2/3$. Similarly, $33\%$ of the acceptors will have minimum acceptability of $1/3$, $33\%$ accept $1/2$, $33\%$ accept $2/3$. # # # # + [markdown] deletable=true editable=true # ### Modeling Strategy Evolution # # After each round, we will have the distribution of goods in this population. According to the distribution of goods for different strategies, we assign strategies to next population. # Assuming the three strategies follow a multinomial distribution. It is a generalization of the binomial distribution and it models the probability of counts for rolling a $k$-sided die $n$ times. Here $k$ is the number of strategies. $n$ is the number of players (proposer or acceptor). # # In the beginning, we draw a sample of strategies based on the number of players from a multinomial distribution with equal probabilities: # # $$S(N) \sim \mathcal{M}(1/3, 1/3, 1/3)$$ # # In the result of this sample, it gave us the number of players assigned of each strategy. For instance, if $N=20$, $S(N) = [10, 4, 6]$ means 10 players out of 20 were assigned strategy 1, 4 of them assigned for strategy 2, and 6 of them assigned strategy 3. # # # After one round, according to the rewarded goods for each strategy, we sample a new set of strategies for the players from the multinomial distribution: # # $$S(N) \sim \mathcal{M}(a, b, c)$$ # # where a, b, and c are the normalized amounts of rewarded goods for strategy 1, 2, and 3. # # # ### Input Variables # - G = 3.0 # goods for each pair of players in one ultimatum game # - N = 100 # number of population # - R = 10 # number of rounds # - demands_strategy = [1./3., 1./2., 2./3.] # - min_accept_strategy = [1./3., 1./2., 2./3.] # # ### Output # # A json file contains: # - round id (for each round, we have the following fields) # - proposers_ids: array of all proposer user ids # - acceptors_ids: array of all acceptor user ids # - propose_strategy: (key, value) pairs where key is the proposer id and value is the demanding money. # - accept_strategy: (key, value) pairs where key is the acceptor id and value is the min acceptable money. # # ### Observations # # With 100 players (50 proposers and 50 acceptors), after 10 rounds of evolution, we observe that demanding $1/2$ gains more goods for a proposer and thus it is getting more widely adopted; minimum acceptability of $1/3$ gains more goods for a acceptor. # + deletable=true editable=true import numpy as np import random import json import collections from copy import deepcopy from collections import OrderedDict demands_strategy = [1./3., 1./2., 2./3.] min_accept_strategy = [1./3., 1./2., 2./3.] G = 3.0 # goods for each pair of players in one ultimatum game N = 100 # number of population R = 10 # number of rounds def assign_strategy(group, stratege_distr): a = deepcopy(group) # print stratege_distr assert len(a) == sum(stratege_distr) strategy = {} assigned_count = 0 for stgy_id, stgy_count in enumerate(stratege_distr): count = 0 current_ = random.sample(a, stgy_count) for val in current_: strategy[val] = stgy_id a = list(set(a) - set(current_)) return strategy demands_strategy_distribution = {} demands_strategy_distribution[0] = np.random.multinomial(N/2, [1/3.]*3) accept_strategy_distribution = {} accept_strategy_distribution[0] = np.random.multinomial(N/2, [1/3.]*3) outjson = {} for r in xrange(R): hist_goods = {} outjson['round_%d' % r] = {} proposers = random.sample(range(0, N - 1), N/2) acceptors = [x for x in xrange(N) if x not in proposers] proposer_strategies = {} proposer_strategy_goods = collections.defaultdict(float) acceptor_strategies = {} acceptor_strategy_goods = collections.defaultdict(float) ## Assigning strategies proposer_strategies = assign_strategy(proposers, demands_strategy_distribution[r]) acceptor_strategies = assign_strategy(acceptors, accept_strategy_distribution[r]) outjson['round_%d' % r]['proposers_ids'] = proposers outjson['round_%d' % r]['acceptors_ids'] = acceptors outjson['round_%d' % r]['propose_strategy'] = {} outjson['round_%d' % r]['accept_strategy'] = {} for pairid, proposer_id in enumerate(proposers): acceptor_id = acceptors[pairid] propose_stgy_id = proposer_strategies[proposer_id] propose_stgy_value = demands_strategy[propose_stgy_id] accept_stgy_id = acceptor_strategies[acceptor_id] accept_stgy_value = min_accept_strategy[accept_stgy_id] outjson['round_%d' % r]['propose_strategy'][proposer_id] = propose_stgy_value outjson['round_%d' % r]['accept_strategy'][acceptor_id] = accept_stgy_value if (1. - propose_stgy_value) >= accept_stgy_value: hist_goods[proposer_id] = propose_stgy_value * G hist_goods[acceptor_id] = (1. - propose_stgy_value) * G proposer_strategy_goods[propose_stgy_id] += propose_stgy_value * G acceptor_strategy_goods[accept_stgy_id] += (1. - propose_stgy_value) * G else: hist_goods[proposer_id] = 0. hist_goods[acceptor_id] = 0. proposer_strategy_goods[propose_stgy_id] += 0. acceptor_strategy_goods[accept_stgy_id] += 0. ## Assign strategy distribution for next round based on their rewards from this round all_goods = sum(proposer_strategy_goods.values()) stgy_dist = [] for key in range(0, len(demands_strategy)): stgy_dist.append(proposer_strategy_goods[key]/all_goods) demands_strategy_distribution[r+1] = np.random.multinomial(N/2, stgy_dist) print demands_strategy_distribution[r+1] all_goods = sum(acceptor_strategy_goods.values()) stgy_dist = [] for key in range(0, len(min_accept_strategy)): stgy_dist.append(acceptor_strategy_goods[key]/all_goods) accept_strategy_distribution[r+1] = np.random.multinomial(N/2, stgy_dist) print accept_strategy_distribution[r+1] n, bins, patches = plt.hist(hist_goods.values(), 10, facecolor='green', alpha=0.75) plt.xlabel('Goods') plt.ylabel('Probability') plt.grid(True) plt.show() with open('../data/ultimatum/synthetic_data/synthetic_data_multi_rouds.json', 'wb') as writer: writer.write(json.dumps(outjson)) # + deletable=true editable=true
notebooks/linear_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # VARMAX models # # This is a brief introduction notebook to VARMAX models in statsmodels. The VARMAX model is generically specified as: # $$ # y_t = \nu + A_1 y_{t-1} + \dots + A_p y_{t-p} + B x_t + \epsilon_t + # M_1 \epsilon_{t-1} + \dots M_q \epsilon_{t-q} # $$ # # where $y_t$ is a $\text{k_endog} \times 1$ vector. # %matplotlib inline import numpy as np import pandas as pd import statsmodels.api as sm import matplotlib.pyplot as plt dta = sm.datasets.webuse('lutkepohl2', 'https://www.stata-press.com/data/r12/') dta.index = dta.qtr endog = dta.loc['1960-04-01':'1978-10-01', ['dln_inv', 'dln_inc', 'dln_consump']] # ## Model specification # # The `VARMAX` class in statsmodels allows estimation of VAR, VMA, and VARMA models (through the `order` argument), optionally with a constant term (via the `trend` argument). Exogenous regressors may also be included (as usual in statsmodels, by the `exog` argument), and in this way a time trend may be added. Finally, the class allows measurement error (via the `measurement_error` argument) and allows specifying either a diagonal or unstructured innovation covariance matrix (via the `error_cov_type` argument). # ## Example 1: VAR # # Below is a simple VARX(2) model in two endogenous variables and an exogenous series, but no constant term. Notice that we needed to allow for more iterations than the default (which is `maxiter=50`) in order for the likelihood estimation to converge. This is not unusual in VAR models which have to estimate a large number of parameters, often on a relatively small number of time series: this model, for example, estimates 27 parameters off of 75 observations of 3 variables. exog = endog['dln_consump'] mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(2,0), trend='nc', exog=exog) res = mod.fit(maxiter=1000, disp=False) print(res.summary()) # From the estimated VAR model, we can plot the impulse response functions of the endogenous variables. ax = res.impulse_responses(10, orthogonalized=True).plot(figsize=(13,3)) ax.set(xlabel='t', title='Responses to a shock to `dln_inv`'); # ## Example 2: VMA # # A vector moving average model can also be formulated. Below we show a VMA(2) on the same data, but where the innovations to the process are uncorrelated. In this example we leave out the exogenous regressor but now include the constant term. mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(0,2), error_cov_type='diagonal') res = mod.fit(maxiter=1000, disp=False) print(res.summary()) # ## Caution: VARMA(p,q) specifications # # Although the model allows estimating VARMA(p,q) specifications, these models are not identified without additional restrictions on the representation matrices, which are not built-in. For this reason, it is recommended that the user proceed with error (and indeed a warning is issued when these models are specified). Nonetheless, they may in some circumstances provide useful information. mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(1,1)) res = mod.fit(maxiter=1000, disp=False) print(res.summary())
examples/notebooks/statespace_varmax.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Time: O(n * s) + O(n * s) ~ O(n * s) where n -> Length of Alien dictionary & s -> Length of the largest word in dictionary # Space: O(n) + O(n) + O(n) + O(n + s) ~ O(n) from collections import defaultdict def verify_alien_dictionary(words): def dfs(u): seen.add(u) for v in graph[u]: if not in_degree[v] or v in seen: continue in_degree[v] -= 1 if not in_degree[v]: res.append(v) dfs(v) graph = defaultdict(list) in_degree = defaultdict(int) n = len(words) for i in range(n - 1): for j in range(len(min(words[i], words[i + 1]))): if words[i][j] != words[i + 1][j]: graph[words[i][j]].append(words[i + 1][j]) in_degree[words[i + 1][j]] += 1 if not in_degree[words[i][j]]: in_degree[words[i][j]] = 0 break seen = set() res = [] for c in in_degree: if not in_degree[c] and c not in seen: res.append(c) dfs(c) for word in words: for c in word: if c not in res: res.append(c) return res if __name__=='__main__': dictionary = ['yxx', 'yxxta', 'xyzt', 'xyzx', 'zxy', 'zxt'] res = verify_alien_dictionary(dictionary) for r in res: print(r, end = ' -> ') print('END') # -
assignments/graph/Verifying an Alien Dictionary using Topological Algorithm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font color=Teal>ATOMIC and ASTRING FUNCTIONS (Python Code)</font> # ### By <NAME>, PhD, Dr.Eng., Professor, Honorary Professor # - https://www.researchgate.net/profile/Sergei_Eremenko # - https://www.amazon.com/Sergei-Eremenko/e/B082F3MQ4L # - https://www.linkedin.com/in/sergei-eremenko-3862079 # - https://www.facebook.com/SergeiEremenko.Author # Atomic functions (AF) described in many books and hundreds of papers have been discovered in 1970s by Academician NAS of Ukraine Rvachev V.L. (https://ru.wikipedia.org/w/index.php?oldid=83948367) (author's teacher) and professor Rvachev V.A. and advanced by many followers, notably professor Kravchenko V.F. (https://ru.wikipedia.org/w/index.php?oldid=84521570), <NAME> (https://www.researchgate.net/profile/Hrvoje_Gotovac), <NAME> (https://www.researchgate.net/profile/Volodymyr_Kolodyazhny), <NAME> (https://www.researchgate.net/profile/Oleg_Kravchenko) as well as the author <NAME> (https://www.researchgate.net/profile/Sergei_Eremenko) [1-4] for a wide range of applications in mathematical physics, boundary value problems, statistics, radio-electronics, telecommunications, signal processing, and others. # As per historical survey (https://www.researchgate.net/publication/308749839), some elements, analogs, subsets or Fourier transformations of AFs sometimes named differently (Fabius function, hat function, compactly supported smooth function) have been probably known since 1930s and rediscovered many times by scientists from different countries, including Fabius, W.Hilberg and others. However, the most comprehensive 50+ years’ theory development supported by many books, dissertations, hundreds of papers, lecture courses and multiple online resources have been performed by the schools of V.L. Rvachev, V.A. Rvachev and <NAME>. # In 2017-2020, <NAME>, in papers "Atomic Strings and Fabric of Spacetime", "Atomic Solitons as a New Class of Solitons", "Atomic Machine Learning" and book "Soliton Nature" [1-8], has introduced <b>AString</b> atomic function as an integral and 'composing branch' of Atomic Function up(x): <font color=maroon>AString'(x) = AString(2x+1) - AString(2x-1) = up(x)</font> # AString function, is a smooth solitonic kink function by joining of which on a periodic lattice it is possible to compose a straight-line resembling flat spacetime as well as to build 'solitonic atoms' composing different fields. It may lead to novel models of spacetime and quantized gravity where AString may describe Spacetime Quantum, or Spacetime Metriant. Also, representing of different fields via shift and stretches of AStrings and Atomic Functions may lead to unified theory where AString may describe some fundamental building block of quantum fields, like a string, elementary spacetime distortion or metriant. # So, apart from traditional areas of AF applications in mathematical physics, radio-electronics and signal processing, AStrings and Atomic Functions may be expanded to Spacetime Physics, String theory, General and Special Relativity, Theory of Solitons, Lattice Physics, Quantized Gravity, Cosmology, Dark matter and Multiverse theories as well as Finite Element Methods, Nonarchimedean Computers, Atomic regression analysis, Atomic Kernels, Machine Learning and Artificial Intelligence. # # <font color=teal>1. Atomic Function up(x) (introduced in 1971 by V.L.Rvachev and V.A.Rvachev)</font> import numpy as np import pylab as pl pl.rcParams["figure.figsize"] = 9,6 # + ################################################################### ##This script calculates the values of Atomic Function up(x) (1971) ################################################################### ################### One Pulse of atomic function def up1(x: float) -> float: #Atomic function table up_y = [0.5, 0.48, 0.460000017,0.440000421,0.420003478,0.400016184, 0.380053256, 0.360139056, 0.340308139, 0.320605107,0.301083436, 0.281802850, 0.262826445, 0.244218000, 0.226041554, 0.208361009, 0.191239338, 0.174736305, 0.158905389, 0.143991189, 0.129427260, 0.115840866, 0.103044024, 0.9110444278e-01, 0.798444445e-01, 0.694444445e-01, 0.598444445e-01, 0.510444877e-01, 0.430440239e-01, 0.358409663e-01, 0.294282603e-01, 0.237911889e-01, 0.189053889e-01, 0.147363055e-01, 0.112393379e-01, 0.836100883e-02, 0.604155412e-02, 0.421800000e-02, 0.282644445e-02, 0.180999032e-02, 0.108343562e-02, 0.605106267e-03, 0.308138660e-03, 0.139055523e-03, 0.532555251e-04, 0.161841328e-04, 0.347816874e-05, 0.420576116e-05, 0.167693347e-07, 0.354008603e-10, 0] up_x = np.arange(0.5, 1.01, 0.01) res = 0. if ((x>=0.5) and (x<=1)): for i in range(len(up_x) - 1): if (up_x[i] >= x) and (x < up_x[i+1]): N1 = 1 - (x - up_x[i])/0.01 res = N1 * up_y[i] + (1 - N1) * up_y[i+1] return res return res ############### Atomic Function Pulse with width, shift and scale ############# def upulse(t: float, a = 1., b = 0., c = 1., d = 0.) -> float: x = (t - b)/a res = 0. if (x >= 0.5) and (x <= 1): res = up1(x) elif (x >= 0.0) and (x < 0.5): res = 1 - up1(1 - x) elif (x >= -1 and x <= -0.5): res = up1(-x) elif (x > -0.5) and (x < 0): res = 1 - up1(1 + x) res = d + res * c return res ############### Atomic Function Applied to list with width, shift and scale ############# def up(x: list, a = 1., b = 0., c = 1., d = 0.) -> list: res = [] for i in range(len(x)): res.append(upulse(x[i], a, b, c, d)) return res # - x = np.arange(-2.0, 2.0, 0.01) pl.title('Atomic Function up(x)') pl.plot(x, up(x), label='Atomic Function') pl.grid(True) pl.show() # # <font color=teal>2. Atomic String Function (AString) is an Integral and Composing Branch of Atomic Function up(x) (introduced in 2017 by <NAME>)</font> # AString function is solitary kink function which simultaneously is integral and composing branch of atomic function up(x) # ### <font color=maroon>AString'(x) = AString(2x+1) - AString(2x-1) = up(x)</font> # + ############### Atomic String ############# def AString1(x: float) -> float: res = 1 * (upulse(x/2.0 - 0.5) - 0.5) return res ############### Atomic String Pulse with width, shift and scale ############# def AStringPulse(t: float, a = 1., b = 0., c = 1., d = 0.) -> float: x = (t - b)/a if (x < -1): res = -0.5 elif (x > 1): res = 0.5 else: res = AString1(x) res = d + res * c return res ###### Atomic String Applied to list with width, shift and scale ############# def AString(x: list, a = 1., b = 0., c = 1., d = 0.) -> list: res = [] for i in range(len(x)): res.append(AStringPulse(x[i], a, b, c, d)) #res[i] = AStringPulse(x[i], a, b, c) return res ###### Summation of two lists ############# def Sum(x1: list, x2: list) -> list: res = [] for i in range(len(x1)): res.append(x1[i] + x2[i]) return res # - x = np.arange(-2.0, 2.0, 0.01) pl.title('Atomic String Function') pl.plot(x, AString(x, 1.0, 0, 1, 0), label='Atomic String') pl.grid(True) pl.show() # ## Atomic String, Atomic Function (AF) and AF Derivative plotted together # + x = np.arange(-2.0, 2.0, 0.01) #This Calculates Derivative dx = x[1] - x[0] dydx = np.gradient(up(x), dx) pl.plot(x, up(x), label='Atomic Function') pl.plot(x, AString(x, 1.0, 0, 1, 0), linewidth=2, label='Atomic String Function') pl.plot(x, dydx, '--', label='A-Function Derivative') pl.title('Atomic and AString Functions') pl.legend(loc='best', numpoints=1) pl.grid(True) pl.show() # - # # <font color=teal>3. Properties of Atomic Function Up(x)</font> # ## 3.1. Atomic Function Derivative expressed via Atomic Function itself # Atomic Function Derivative can be exressed via Atomic Function itself - up'(x)= 2up(2x+1)-2up(2x-1) meaning the shape of pulses for derivative function can be represented by shifted and stratched Atomic Function itself - remarkable property # ### <font color=maroon>up'(x)= 2up(2x+1)-2up(2x-1)</font> # ### Atomic Function and its Derivative plotted together # + x = np.arange(-2.0, 2.0, 0.01) pl.plot(x, up(x), label='Atomic Function', linewidth=2) pl.plot(x, dydx, '--', label='Atomic Function Derivative', linewidth=1, color="Green") pl.title('Atomic Function and Its Derivative') pl.legend(loc='best', numpoints=1) pl.grid(True) pl.show() # - # ## 3.2. Partition of Unity # The Atomic Function pulses superposition set at points -2, -1, 0, +1, +2... can exactly represent a Unity (number 1): # 1 = ... up(x-3) + up(x-2) + up(x-1) + up(x-0) + up(x+1) + up(x+2) + up(x+3) + ... # ### <font color=maroon>1 = ... up(x-3) + up(x-2) + up(x-1) + up(x-0) + up(x+1) + up(x+2) + up(x+3) + ...</font> x = np.arange(-2.0, 2.0, 0.01) pl.plot(x, up(x, 1, -1), '--', linewidth=1, label='Atomic Function at x=-1') pl.plot(x, up(x, 1, +0), '--', linewidth=1, label='Atomic Function at x=0') pl.plot(x, up(x, 1, -1), '--', linewidth=1, label='Atomic Function at x=-1') pl.plot(x, Sum(up(x, 1, -1), Sum(up(x), up(x, 1, 1))), linewidth=2, label='Atomic Function Compounding') pl.title('Atomic Function Compounding represent 1') pl.legend(loc='best', numpoints=1) pl.grid(True) pl.show() # ## 3.3. Atomic Function (AF) is a 'finite', 'compactly supported', or 'solitary' function # Like a Spline, Atomic Function (AF) 'compactly supported' not equal to zero only on section |x|<=1 # + x = np.arange(-5.0, 5.0, 0.01) pl.plot(x, up(x), label='Atomic Function', linewidth=2) #pl.plot(x, dydx, '--', label='Atomic Function Derivative', linewidth=1, color="Green") pl.title('Atomic Function is compactly supported') pl.legend(loc='best', numpoints=1) pl.grid(True) pl.show() # - # ## 3.4 Atomic Function is a non-analytical function (can not be represented by Taylor's series), but with known Fourier Transformation allowing to exactly calculate AF in certain points, with tabular representation provided in script above. # # <font color=teal>4. Properties of Atomic String Function</font> # ## 4.1. AString is not only Integral but also Composing Branch of Atomic Function # ### <font color=maroon>AString'(x) = AString(2x+1) - AString(2x-1) = up(x)</font> # Astring is a swing-like function - Integral of Atomic Function (AF) which can be expressed via AF itself: # AString(x) = Integral(0,x)(Up(x)) = Up(x/2 - 1/2) - 1/2 # ### <font color=maroon>AString(x) = Integral(0,x)(Up(x)) = Up(x/2 - 1/2) - 1/2</font> # ## 4.2. Atomic Function is a 'solitonic atom' composed from two opposite AStrings # The concept of 'Solitonic Atoms' (bions) composed from opposite kinks is known in soliton theory [3,5]. # ### <font color=maroon>up(x) = AString(2x + 1) - AString(2x - 1)</font> # + ######### Presentation of Atomic Function via Atomic Strings ########## x = np.arange(-2.0, 2.0, 0.01) pl.plot(x, AString(x, 1, 0, 1, 0), '--', linewidth=1, label='AString(x)') pl.plot(x, AString(x, 0.5, -0.5, +1, 0), '--', linewidth=2, label='+AString(2x+1)') pl.plot(x, AString(x, 0.5, +0.5, -1, 0), '--', linewidth=2, label='-AString(2x-1)') #pl.plot(x, up(x, 1.0, 0, 1, 0), '--', linewidth=1, label='Atomic Function') AS2 = Sum(AString(x, 0.5, -0.5, +1, 0), AString(x, 0.5, +0.5, -1, 0)) pl.plot(x, AS2, linewidth=3, label='Up(x) via Strings') pl.title('Atomic Function as a Combination of AStrings') pl.legend(loc='best', numpoints=1) pl.grid(True) pl.show() # - # ## 4.3. AStrings and Atomic Solitons # Solitonic mathematical properties of AString and Atomic Functions have been explored in author's paper [3] (<NAME>. Atomic solitons as a new class of solitons; 2018; https://www.researchgate.net/publication/329465767). They both satisfy differential equations with shifted arguments which introduce special kind of <b>nonlinearity</b> typical for all mathematical solitons. # AString belong to the class of <b>Solitonic Kinks</b> similar to sine-Gordon, Frenkel-Kontorova, tanh and others. Unlike other kinks, AStrings are truly solitary (compactly-supported) and also have a unique property of composing of both straight-line and solitonic atoms on lattice resembling particle-like properties of solitons. # Atomic Function up(x) is not actually a mathematical soliton, but a complex object composed from summation of two opposite AString kinks, and in solitonic terminology, is called 'solitonic atoms' (like bions). # ## 4.4. All derivatives of AString can be represented via AString itself # ### <font color=maroon>AString'(x) = AString(2x + 1) - AString(2x - 1)</font> # It means AString is a smooth (infinitely divisible) function, with fractalic properties. # ## 4.5. AString and Fabius Function # Fabius Function https://en.wikipedia.org/wiki/Fabius_function, with unique property f'(x) = 2f(2x), published in 1966 but was probably known since 1935, is shifted and stretched AString function. Fabius function is not directly an integral of atomic function up(x). # ### <font color=maroon>Fabius(x) = AString(2x - 1) + 0.5</font> x = np.arange(-2, 2.0, 0.01) pl.title('AString and Fabius Functions') pl.plot(x, AString(x, 0.5, 0.5, 1, 0.5), label='Fabius Function') pl.plot(x, AString(x, 1, 0, 1, 0), label='AString Function') pl.legend(loc='best', numpoints=1) pl.grid(True) pl.show() # ## 4.6. Partition of Line from Atomic String functions # Combination/summation of Atomic Strings can exactly represent a straight line: # x = ...Astring(x-2) + Astring(x-1) + AString(x) + Astring(x+1) + Astring(x+2)... # ### <font color=maroon>x = ...Astring(x-2) + Astring(x-1) + AString(x) + Astring(x+1) + Astring(x+2)...</font> # ### Partition based on AString function with width 1 and height 1 # + x = np.arange(-3, 3, 0.01) pl.plot(x, AString(x, 1, -1.0, 1, 0), '--', linewidth=1, label='AString 1') pl.plot(x, AString(x, 1, +0.0, 1, 0), '--', linewidth=1, label='AString 2') pl.plot(x, AString(x, 1, +1.0, 1, 0), '--', linewidth=1, label='AString 3') AS2 = Sum(AString(x, 1, -1.0, 1, 0), AString(x, 1, +0.0, 1, 0)) AS3 = Sum(AS2, AString(x, 1, +1.0, 1, 0)) pl.plot(x, AS3, label='AStrings Sum', linewidth=2) pl.title('Atomic Strings compose Line') pl.legend(loc='best', numpoints=1) pl.grid(True) pl.show() # - # ### Partition based on AString with certain width and height depending on a size of 'quanta' # + x = np.arange(-40.0, 40.0, 0.01) width = 10.0 height = 10.0 #pl.plot(x, ABline (x, 1, 0), label='ABLine 1*x') pl.plot(x, AString(x, width, -3*width/2, height, -3*width/2), '--', linewidth=1, label='AString 1') pl.plot(x, AString(x, width, -1*width/2, height, -1*width/2), '--', linewidth=1, label='AString 2') pl.plot(x, AString(x, width, +1*width/2, height, +1*width/2), '--', linewidth=1, label='AString 3') pl.plot(x, AString(x, width, +3*width/2, height, +3*width/2), '--', linewidth=1, label='AString 4') AS2 = Sum(AString(x, width, -3*width/2, height, -3*width/2), AString(x, width, -1*width/2, height, -1*width/2)) AS3 = Sum(AS2, AString(x, width,+1*width/2, height, +1*width/2)) AS4 = Sum(AS3, AString(x, width,+3*width/2, height, +3*width/2)) pl.plot(x, AS4, label='AStrings Joins', linewidth=2) pl.title('Atomic Strings Combinations') pl.legend(loc='best', numpoints=1) pl.grid(True) pl.show() # - # # 5. Representing curved shapes via AStrings and Atomic Functions # Shifts and stretches of Atomic adn AString functions allows reproducing curved surfaces (eq curved spacetime). Details are in author's papers "Atomic Strings and Fabric of Spacetime", "Atomic Solitons as a New Class of Solitons". # + x = np.arange(-50.0, 50.0, 0.1) dx = x[1] - x[0] CS6 = Sum(up(x, 5, -30, 5, 5), up(x, 15, 0, 15, 5)) CS6 = Sum(CS6, up(x, 10, +30, 10, 5)) pl.plot(x, CS6, label='Spacetime Density distribution') IntC6 = np.cumsum(CS6)*dx/50 pl.plot(x, IntC6, label='Spacetime Shape (Geodesics)') DerC6 = np.gradient(CS6, dx) pl.plot(x, DerC6, label='Spacetime Curvature') LightTrajectory = -10 -IntC6/5 pl.plot(x, LightTrajectory, label='Light Trajectory') pl.title('Shape of Curved Spacetime model') pl.legend(loc='best', numpoints=1) pl.grid(True) pl.show() # - # # <font color=teal>6. 'Soliton Nature' book</font> # ## 6.1. AStrings and Atomic functions are also described in the book 'Soliton Nature' # Soliton Nature book is easy-to-read, pictorial, interactive book which uses beautiful photography, video channel, and computer scripts in R and Python to demonstrate existing and explore new solitons – the magnificent and versatile energy concentration phenomenon of nature. New class of atomic solitons can be used to describe Higgs boson (‘the god particle’) fields, spacetime quanta and other fundamental building blocks of nature. #pl.rcParams["figure.figsize"] = 16,12 book = pl.imread('BookSpread_small.png') pl.imshow(book) # ## 6.2. 'Soliton Nature' Video Channel, Book Trailer and Web Site # Video channel https://www.youtube.com/channel/UCexT5iyczZH2HY1-jSafFeQ features amazing solitonic phenomena in nature - welcome to subscribe # Book web site www.solitonnature.com contains book chapters and amazing video-gallery # Book Trailers: https://www.youtube.com/watch?v=cZMZdW_3J84, https://www.youtube.com/watch?v=2lABLpIcevo, https://www.youtube.com/watch?v=hQ3zGFEnSWI # ## 6.3. 'Soliton Nature' book in major bookstores around the globe # - Amazon US https://www.amazon.com/gp/product/1951630777, # - Amazon UK https://www.amazon.co.uk/Sergei-Eremenko/e/B082F3MQ4L, # - Amazon Germany https://www.amazon.de/Sergei-Eremenko/e/B082F3MQ4L, # - Amazon France https://www.amazon.fr/Soliton-Nature-Discover-Beautiful-Channel/dp/1951630777, # - Google Books https://books.google.com.au/books/about/Soliton_Nature.html?id=d2zNDwAAQBAJ, # - Kindle eBooks of your country, like https://www.amazon.com/Soliton-Nature-Discover-Beautiful-Channel-ebook/dp/B082B5PP6R. # - Book web site www.solitonnature.com # # <font color=teal>7. Online Source Code Repositories</font> # This code is available on GitHub: https://solitonscientific.github.io/AtomicString/AFAString.html # See also # # - https://github.com/SolitonScientific # # - https://solitonscientific.github.io/AtomicSoliton/AtomicSoliton.html # - https://solitonscientific.github.io/AtomicString/AtomicString1.html # - https://solitonscientific.github.io/AtomicMachineLearning/AtomicMachineLearning.html # # - https://notebooks.azure.com/Soliton/projects/AtomicString1 # - https://notebooks.azure.com/Soliton/projects/solitonnature # - https://notebooks.azure.com/Soliton/projects/geosolitons # # References # 1. <NAME>. Atomic Strings and Fabric of Spacetime. Journal Achievements of Modern Radioelectronics, 2018. No.6. https://www.researchgate.net/publication/329455498 # 2. <NAME>. Atomic solitons as a new class of solitons. Journal Nonlinear World, No.6, Vol.16, 2018, p.39-63. DOI: 10.18127/j20700970-201806-06. https://www.researchgate.net/publication/329455498 # 3. <NAME>. Atomic solitons as a new class of solitons (English, with Russian Abstract). Journal Nonlinear World, No.6, Vol.16, 2018, p.39-63. DOI: 10.18127/j20700970-201806-06. https://www.researchgate.net/publication/329465767 # 4. <NAME>. Soliton Nature: Discover Beautiful Nature with 200 Images and Video Channel. ISBN: 978-1-951630-77-5. https://www.amazon.com/gp/product/1951630777; https://www.researchgate.net/publication/321228263; # 5. Eremenko, S.Yu. Atomic Machine Learning. Journal Neurocomputers. 2018, No.3. https://www.researchgate.net/publication/322520539_Atomic_Machine_Learning # 6. ResearchGate project: https://www.researchgate.net/project/Atomic-Strings-Quantum-of-Spacetime-and-Gravitation # 7. ResearchGate project: https://www.researchgate.net/project/Atomic-String-and-Atomic-Function-New-Soliton-Candidates # 8. ResearchGate project: https://www.researchgate.net/project/Atomic-Strings-Quantum-of-Spacetime-and-Gravitation # <div align=right><i>By <font color=Teal><b><NAME></b></font>, PhD, Dr.Eng., Professor, Honorary Professor <br> # https://www.researchgate.net/profile/Sergei_Eremenko <br> # https://www.amazon.com/Sergei-Eremenko/e/B082F3MQ4L <br> # https://www.linkedin.com/in/sergei-eremenko-3862079 <br> # https://www.facebook.com/SergeiEremenko.Author/ # </i></div>
AFAString.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os, glob import numpy as np, pandas as pd, seaborn as sns, matplotlib.pyplot as plt def get_results(clf): n = 5 results = { 'accuracy': [], 'f1': [], 'memory_fit': [], 'time_fit': [], 'memory_predict': [], 'time_predict': [] } for file in glob.glob(os.path.join('experiments', clf, 'final test *')): file, ext = os.path.splitext(file) if ext in ['.csv', '.npy']: continue with open(file, 'r') as f: for line in f: key, value = line.strip('\n').split(': ') if key in results.keys(): results[key].append(float(value)) results = {k:np.array(v) for k, v in results.items()} means = {k:v.mean() for k, v in results.items()} errs = {k:v.std()/np.sqrt(n) for k, v in results.items()} return means, errs get_results('lstm')
notebooks/Test Results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_pytorch_p36 # language: python # name: conda_pytorch_p36 # --- # # Sentiment Analysis # # ## Updating a Model in SageMaker # # _Deep Learning Nanodegree Program | Deployment_ # # --- # # In this notebook we will consider a situation in which a model that we constructed is no longer working as we intended. In particular, we will look at the XGBoost sentiment analysis model that we constructed earlier. In this case, however, we have some new data that our model doesn't seem to perform very well on. As a result, we will re-train our model and update an existing endpoint so that it uses our new model. # # This notebook starts by re-creating the XGBoost sentiment analysis model that was created in earlier notebooks. This means that you will have already seen the cells up to the end of Step 4. The new content in this notebook begins at Step 5. # # ## Instructions # # Some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully! # # In addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell. # # > **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted. # ## Step 1: Downloading the data # # The dataset we are going to use is very popular among researchers in Natural Language Processing, usually referred to as the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/). It consists of movie reviews from the website [imdb.com](http://www.imdb.com/), each labeled as either '**pos**itive', if the reviewer enjoyed the film, or '**neg**ative' otherwise. # # > Maas, <NAME>., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011. # # We begin by using some Jupyter Notebook magic to download and extract the dataset. # %mkdir ../data # !wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz # !tar -zxf ../data/aclImdb_v1.tar.gz -C ../data # ## Step 2: Preparing the data # # The data we have downloaded is split into various files, each of which contains a single review. It will be much easier going forward if we combine these individual files into two large files, one for training and one for testing. # + import os import glob def read_imdb_data(data_dir='../data/aclImdb'): data = {} labels = {} for data_type in ['train', 'test']: data[data_type] = {} labels[data_type] = {} for sentiment in ['pos', 'neg']: data[data_type][sentiment] = [] labels[data_type][sentiment] = [] path = os.path.join(data_dir, data_type, sentiment, '*.txt') files = glob.glob(path) for f in files: with open(f) as review: data[data_type][sentiment].append(review.read()) # Here we represent a positive review by '1' and a negative review by '0' labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0) assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \ "{}/{} data size does not match labels size".format(data_type, sentiment) return data, labels # - data, labels = read_imdb_data() print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format( len(data['train']['pos']), len(data['train']['neg']), len(data['test']['pos']), len(data['test']['neg']))) # + from sklearn.utils import shuffle def prepare_imdb_data(data, labels): """Prepare training and test sets from IMDb movie reviews.""" #Combine positive and negative reviews and labels data_train = data['train']['pos'] + data['train']['neg'] data_test = data['test']['pos'] + data['test']['neg'] labels_train = labels['train']['pos'] + labels['train']['neg'] labels_test = labels['test']['pos'] + labels['test']['neg'] #Shuffle reviews and corresponding labels within training and test sets data_train, labels_train = shuffle(data_train, labels_train) data_test, labels_test = shuffle(data_test, labels_test) # Return a unified training data, test data, training labels, test labets return data_train, data_test, labels_train, labels_test # - train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels) print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X))) train_X[100] # ## Step 3: Processing the data # # Now that we have our training and testing datasets merged and ready to use, we need to start processing the raw data into something that will be useable by our machine learning algorithm. To begin with, we remove any html formatting that may appear in the reviews and perform some standard natural language processing in order to homogenize the data. import nltk nltk.download("stopwords") from nltk.corpus import stopwords from nltk.stem.porter import * stemmer = PorterStemmer() # + import re from bs4 import BeautifulSoup def review_to_words(review): text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case words = text.split() # Split string into words words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords words = [PorterStemmer().stem(w) for w in words] # stem return words # - review_to_words(train_X[100]) # + import pickle cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists def preprocess_data(data_train, data_test, labels_train, labels_test, cache_dir=cache_dir, cache_file="preprocessed_data.pkl"): """Convert each review to words; read from cache if available.""" # If cache_file is not None, try to read from it first cache_data = None if cache_file is not None: try: with open(os.path.join(cache_dir, cache_file), "rb") as f: cache_data = pickle.load(f) print("Read preprocessed data from cache file:", cache_file) except: pass # unable to read from cache, but that's okay # If cache is missing, then do the heavy lifting if cache_data is None: # Preprocess training and test data to obtain words for each review #words_train = list(map(review_to_words, data_train)) #words_test = list(map(review_to_words, data_test)) words_train = [review_to_words(review) for review in data_train] words_test = [review_to_words(review) for review in data_test] # Write to cache file for future runs if cache_file is not None: cache_data = dict(words_train=words_train, words_test=words_test, labels_train=labels_train, labels_test=labels_test) with open(os.path.join(cache_dir, cache_file), "wb") as f: pickle.dump(cache_data, f) print("Wrote preprocessed data to cache file:", cache_file) else: # Unpack data loaded from cache file words_train, words_test, labels_train, labels_test = (cache_data['words_train'], cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test']) return words_train, words_test, labels_train, labels_test # - # Preprocess data train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y) # ### Extract Bag-of-Words features # # For the model we will be implementing, rather than using the reviews directly, we are going to transform each review into a Bag-of-Words feature representation. Keep in mind that 'in the wild' we will only have access to the training set so our transformer can only use the training set to construct a representation. # + import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.externals import joblib # joblib is an enhanced version of pickle that is more efficient for storing NumPy arrays def extract_BoW_features(words_train, words_test, vocabulary_size=5000, cache_dir=cache_dir, cache_file="bow_features.pkl"): """Extract Bag-of-Words for a given set of documents, already preprocessed into words.""" # If cache_file is not None, try to read from it first cache_data = None if cache_file is not None: try: with open(os.path.join(cache_dir, cache_file), "rb") as f: cache_data = joblib.load(f) print("Read features from cache file:", cache_file) except: pass # unable to read from cache, but that's okay # If cache is missing, then do the heavy lifting if cache_data is None: # Fit a vectorizer to training documents and use it to transform them # NOTE: Training documents have already been preprocessed and tokenized into words; # pass in dummy functions to skip those steps, e.g. preprocessor=lambda x: x vectorizer = CountVectorizer(max_features=vocabulary_size, preprocessor=lambda x: x, tokenizer=lambda x: x) # already preprocessed features_train = vectorizer.fit_transform(words_train).toarray() # Apply the same vectorizer to transform the test documents (ignore unknown words) features_test = vectorizer.transform(words_test).toarray() # NOTE: Remember to convert the features using .toarray() for a compact representation # Write to cache file for future runs (store vocabulary as well) if cache_file is not None: vocabulary = vectorizer.vocabulary_ cache_data = dict(features_train=features_train, features_test=features_test, vocabulary=vocabulary) with open(os.path.join(cache_dir, cache_file), "wb") as f: joblib.dump(cache_data, f) print("Wrote features to cache file:", cache_file) else: # Unpack data loaded from cache file features_train, features_test, vocabulary = (cache_data['features_train'], cache_data['features_test'], cache_data['vocabulary']) # Return both the extracted features as well as the vocabulary return features_train, features_test, vocabulary # - # Extract Bag of Words features for both training and test datasets train_X, test_X, vocabulary = extract_BoW_features(train_X, test_X) len(train_X[100]) # ## Step 4: Classification using XGBoost # # Now that we have created the feature representation of our training (and testing) data, it is time to start setting up and using the XGBoost classifier provided by SageMaker. # # ### Writing the dataset # # The XGBoost classifier that we will be using requires the dataset to be written to a file and stored using Amazon S3. To do this, we will start by splitting the training dataset into two parts, the data we will train the model with and a validation set. Then, we will write those datasets to a file and upload the files to S3. In addition, we will write the test set input to a file and upload the file to S3. This is so that we can use SageMakers Batch Transform functionality to test our model once we've fit it. # + import pandas as pd # Earlier we shuffled the training dataset so to make things simple we can just assign # the first 10 000 reviews to the validation set and use the remaining reviews for training. val_X = pd.DataFrame(train_X[:10000]) train_X = pd.DataFrame(train_X[10000:]) val_y = pd.DataFrame(train_y[:10000]) train_y = pd.DataFrame(train_y[10000:]) # - # The documentation for the XGBoost algorithm in SageMaker requires that the saved datasets should contain no headers or index and that for the training and validation data, the label should occur first for each sample. # # For more information about this and other algorithms, the SageMaker developer documentation can be found on __[Amazon's website.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__ # First we make sure that the local directory in which we'd like to store the training and validation csv files exists. data_dir = '../data/sentiment_update' if not os.path.exists(data_dir): os.makedirs(data_dir) # + pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False) pd.concat([val_y, val_X], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False) pd.concat([train_y, train_X], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) # + # To save a bit of memory we can set text_X, train_X, val_X, train_y and val_y to None. test_X = train_X = val_X = train_y = val_y = None # - # ### Uploading Training / Validation files to S3 # # Amazon's S3 service allows us to store files that can be access by both the built-in training models such as the XGBoost model we will be using as well as custom models such as the one we will see a little later. # # For this, and most other tasks we will be doing using SageMaker, there are two methods we could use. The first is to use the low level functionality of SageMaker which requires knowing each of the objects involved in the SageMaker environment. The second is to use the high level functionality in which certain choices have been made on the user's behalf. The low level approach benefits from allowing the user a great deal of flexibility while the high level approach makes development much quicker. For our purposes we will opt to use the high level approach although using the low-level approach is certainly an option. # # Recall the method `upload_data()` which is a member of object representing our current SageMaker session. What this method does is upload the data to the default bucket (which is created if it does not exist) into the path described by the key_prefix variable. To see this for yourself, once you have uploaded the data files, go to the S3 console and look to see where the files have been uploaded. # # For additional resources, see the __[SageMaker API documentation](http://sagemaker.readthedocs.io/en/latest/)__ and in addition the __[SageMaker Developer Guide.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__ # + import sagemaker session = sagemaker.Session() # Store the current SageMaker session # S3 prefix (which folder will we use) prefix = 'sentiment-update' test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix) val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix) train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix) # - # ### Creating the XGBoost model # # Now that the data has been uploaded it is time to create the XGBoost model. To begin with, we need to do some setup. At this point it is worth discussing what a model is in SageMaker. It is easiest to think of a model of comprising three different objects in the SageMaker ecosystem, which interact with one another. # # - Model Artifacts # - Training Code (Container) # - Inference Code (Container) # # The Model Artifacts are what you might think of as the actual model itself. For example, if you were building a neural network, the model artifacts would be the weights of the various layers. In our case, for an XGBoost model, the artifacts are the actual trees that are created during training. # # The other two objects, the training code and the inference code are then used the manipulate the training artifacts. More precisely, the training code uses the training data that is provided and creates the model artifacts, while the inference code uses the model artifacts to make predictions on new data. # # The way that SageMaker runs the training and inference code is by making use of Docker containers. For now, think of a container as being a way of packaging code up so that dependencies aren't an issue. # + from sagemaker import get_execution_role # Our current execution role is require when creating the model as the training # and inference code will need to access the model artifacts. role = get_execution_role() # + # We need to retrieve the location of the container which is provided by Amazon for using XGBoost. # As a matter of convenience, the training and inference code both use the same container. from sagemaker.amazon.amazon_estimator import get_image_uri container = get_image_uri(session.boto_region_name, 'xgboost') # + # First we create a SageMaker estimator object for our model. xgb = sagemaker.estimator.Estimator(container, # The location of the container we wish to use role, # What is our current IAM Role train_instance_count=1, # How many compute instances train_instance_type='ml.m4.xlarge', # What kind of compute instances output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), sagemaker_session=session) # And then set the algorithm specific parameters. xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, silent=0, objective='binary:logistic', early_stopping_rounds=10, num_round=500) # - # ### Fit the XGBoost model # # Now that our model has been set up we simply need to attach the training and validation datasets and then ask SageMaker to set up the computation. s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv') s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) # ### Testing the model # # Now that we've fit our XGBoost model, it's time to see how well it performs. To do this we will use SageMakers Batch Transform functionality. Batch Transform is a convenient way to perform inference on a large dataset in a way that is not realtime. That is, we don't necessarily need to use our model's results immediately and instead we can peform inference on a large number of samples. An example of this in industry might be peforming an end of month report. This method of inference can also be useful to us as it means to can perform inference on our entire test set. # # To perform a Batch Transformation we need to first create a transformer objects from our trained estimator object. xgb_transformer = xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge') # Next we actually perform the transform job. When doing so we need to make sure to specify the type of data we are sending so that it is serialized correctly in the background. In our case we are providing our model with csv data so we specify `text/csv`. Also, if the test data that we have provided is too large to process all at once then we need to specify how the data file should be split up. Since each line is a single entry in our data set we tell SageMaker that it can split the input on each line. xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line') # Currently the transform job is running but it is doing so in the background. Since we wish to wait until the transform job is done and we would like a bit of feedback we can run the `wait()` method. xgb_transformer.wait() # Now the transform job has executed and the result, the estimated sentiment of each review, has been saved on S3. Since we would rather work on this file locally we can perform a bit of notebook magic to copy the file to the `data_dir`. # !aws s3 cp --recursive $xgb_transformer.output_path $data_dir # The last step is now to read in the output from our model, convert the output to something a little more usable, in this case we want the sentiment to be either `1` (positive) or `0` (negative), and then compare to the ground truth labels. predictions = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None) predictions = [round(num) for num in predictions.squeeze().values] from sklearn.metrics import accuracy_score accuracy_score(test_y, predictions) # ## Step 5: Looking at New Data # # So now we have an XGBoost sentiment analysis model that we believe is working pretty well. As a result, we deployed it and we are using it in some sort of app. # # However, as we allow users to use our app we periodically record submitted movie reviews so that we can perform some quality control on our deployed model. Once we've accumulated enough reviews we go through them by hand and evaluate whether they are positive or negative (there are many ways you might do this in practice aside from by hand). The reason for doing this is so that we can check to see how well our model is doing. # + import new_data new_X, new_Y = new_data.get_new_data() # - # **NOTE:** Part of the fun in this notebook is trying to figure out what exactly is happening with the new data, so try not to cheat by looking in the `new_data` module. Also, the `new_data` module assumes that the cache created earlier in Step 3 is still stored in `../cache/sentiment_analysis`. # ### (TODO) Testing the current model # # Now that we've loaded the new data, let's check to see how our current XGBoost model performs on it. # # First, note that the data that has been loaded has already been pre-processed so that each entry in `new_X` is a list of words that have been processed using `nltk`. However, we have not yet constructed the bag of words encoding, which we will do now. # # First, we use the vocabulary that we constructed earlier using the original training data to construct a `CountVectorizer` which we will use to transform our new data into its bag of words encoding. # # **TODO:** Create the CountVectorizer object using the vocabulary created earlier and use it to transform the new data. # + # TODO: Create the CountVectorizer using the previously constructed vocabulary # vectorizer = None # Solution: vectorizer = CountVectorizer(vocabulary=vocabulary, preprocessor=lambda x: x, tokenizer=lambda x: x) # TODO: Transform our new data set and store the transformed data in the variable new_XV # new_XV = None # Solution new_XV = vectorizer.transform(new_X).toarray() # - # As a quick sanity check, we make sure that the length of each of our bag of words encoded reviews is correct. In particular, it must be the same size as the vocabulary which in our case is `5000`. len(new_XV[100]) # Now that we've performed the data processing that is required by our model we can save it locally and then upload it to S3 so that we can construct a batch transform job in order to see how well our model is working. # # First, we save the data locally. # # **TODO:** Save the new data (after it has been transformed using the original vocabulary) to the local notebook instance. # + # TODO: Save the data contained in new_XV locally in the data_dir with the file name new_data.csv # Solution: pd.DataFrame(new_XV).to_csv(os.path.join(data_dir, 'new_data.csv'), header=False, index=False) # - # Next, we upload the data to S3. # # **TODO:** Upload the csv file created above to S3. # + # TODO: Upload the new_data.csv file contained in the data_dir folder to S3 and save the resulting # URI as new_data_location # new_data_location = None # Solution: new_data_location = session.upload_data(os.path.join(data_dir, 'new_data.csv'), key_prefix=prefix) # - # Then, once the new data has been uploaded to S3, we create and run the batch transform job to get our model's predictions about the sentiment of the new movie reviews. # # **TODO:** Using the `xgb_transformer` object that was created earlier (at the end of Step 4 to test the XGBoost model), transform the data located at `new_data_location`. # + # TODO: Using xgb_transformer, transform the new_data_location data. You may wish to **wait** until # the batch transform job has finished. # Solution: xgb_transformer.transform(new_data_location, content_type='text/csv', split_type='Line') xgb_transformer.wait() # - # As usual, we copy the results of the batch transform job to our local instance. # !aws s3 cp --recursive $xgb_transformer.output_path $data_dir # Read in the results of the batch transform job. predictions = pd.read_csv(os.path.join(data_dir, 'new_data.csv.out'), header=None) predictions = [round(num) for num in predictions.squeeze().values] # And check the accuracy of our current model. accuracy_score(new_Y, predictions) # So it would appear that *something* has changed since our model is no longer (as) effective at determining the sentiment of a user provided review. # # In a real life scenario you would check a number of different things to see what exactly is going on. In our case, we are only going to check one and that is whether some aspect of the underlying distribution has changed. In other words, we want to see if the words that appear in our new collection of reviews matches the words that appear in the original training set. Of course, we want to narrow our scope a little bit so we will only look at the `5000` most frequently appearing words in each data set, or in other words, the vocabulary generated by each data set. # # Before doing that, however, let's take a look at some of the incorrectly classified reviews in the new data set. # # To start, we will deploy the original XGBoost model. We will then use the deployed model to infer the sentiment of some of the new reviews. This will also serve as a nice excuse to deploy our model so that we can mimic a real life scenario where we have a model that has been deployed and is being used in production. # # **TODO:** Deploy the XGBoost model. # + # TODO: Deploy the model that was created earlier. Recall that the object name is 'xgb'. # xgb_predictor = None # Solution: xgb_predictor = xgb.deploy(initial_instance_count = 1, instance_type = 'ml.m4.xlarge') # - # ### Diagnose the problem # # Now that we have our deployed "production" model, we can send some of our new data to it and filter out some of the incorrectly classified reviews. # + from sagemaker.predictor import csv_serializer # We need to tell the endpoint what format the data we are sending is in so that SageMaker can perform the serialization. xgb_predictor.content_type = 'text/csv' xgb_predictor.serializer = csv_serializer # - # It will be useful to look at a few different examples of incorrectly classified reviews so we will start by creating a *generator* which we will use to iterate through some of the new reviews and find ones that are incorrect. # # **NOTE:** Understanding what Python generators are isn't really required for this module. The reason we use them here is so that we don't have to iterate through all of the new reviews, searching for incorrectly classified samples. def get_sample(in_X, in_XV, in_Y): for idx, smp in enumerate(in_X): res = round(float(xgb_predictor.predict(in_XV[idx]))) if res != in_Y[idx]: yield smp, in_Y[idx] gn = get_sample(new_X, new_XV, new_Y) # At this point, `gn` is the *generator* which generates samples from the new data set which are not classified correctly. To get the *next* sample we simply call the `next` method on our generator. print(next(gn)) # After looking at a few examples, maybe we decide to look at the most frequently appearing `5000` words in each data set, the original training data set and the new data set. The reason for looking at this might be that we expect the frequency of use of different words to have changed, maybe there is some new slang that has been introduced or some other artifact of popular culture that has changed the way that people write movie reviews. # # To do this, we start by fitting a `CountVectorizer` to the new data. new_vectorizer = CountVectorizer(max_features=5000, preprocessor=lambda x: x, tokenizer=lambda x: x) new_vectorizer.fit(new_X) # Now that we have this new `CountVectorizor` object, we can check to see if the corresponding vocabulary has changed between the two data sets. original_vocabulary = set(vocabulary.keys()) new_vocabulary = set(new_vectorizer.vocabulary_.keys()) # We can look at the words that were in the original vocabulary but not in the new vocabulary. print(original_vocabulary - new_vocabulary) # And similarly, we can look at the words that are in the new vocabulary but which were not in the original vocabulary. print(new_vocabulary - original_vocabulary) # These words themselves don't tell us much, however if one of these words occured with a large frequency, that might tell us something. In particular, we wouldn't really expect any of the words above to appear with too much frequency. # # **Question** What exactly is going on here. Not only what (if any) words appear with a larger than expected frequency but also, what does this mean? What has changed about the world that our original model no longer takes into account? # # **NOTE:** This is meant to be a very open ended question. To investigate you may need more cells than the one provided below. Also, there isn't really a *correct* answer, this is meant to be an opportunity to explore the data. # ### (TODO) Build a new model # # Supposing that we believe something has changed about the underlying distribution of the words that our reviews are made up of, we need to create a new model. This way our new model will take into account whatever it is that has changed. # # To begin with, we will use the new vocabulary to create a bag of words encoding of the new data. We will then use this data to train a new XGBoost model. # # **NOTE:** Because we believe that the underlying distribution of words has changed it should follow that the original vocabulary that we used to construct a bag of words encoding of the reviews is no longer valid. This means that we need to be careful with our data. If we send an bag of words encoded review using the *original* vocabulary we should not expect any sort of meaningful results. # # In particular, this means that if we had deployed our XGBoost model like we did in the Web App notebook then we would need to implement this vocabulary change in the Lambda function as well. new_XV = new_vectorizer.transform(new_X).toarray() # And a quick check to make sure that the newly encoded reviews have the correct length, which should be the size of the new vocabulary which we created. len(new_XV[0]) # Now that we have our newly encoded, newly collected data, we can split it up into a training and validation set so that we can train a new XGBoost model. As usual, we first split up the data, then save it locally and then upload it to S3. # + import pandas as pd # Earlier we shuffled the training dataset so to make things simple we can just assign # the first 10 000 reviews to the validation set and use the remaining reviews for training. new_val_X = pd.DataFrame(new_XV[:10000]) new_train_X = pd.DataFrame(new_XV[10000:]) new_val_y = pd.DataFrame(new_Y[:10000]) new_train_y = pd.DataFrame(new_Y[10000:]) # - # In order to save some memory we will effectively delete the `new_X` variable. Remember that this contained a list of reviews and each review was a list of words. Note that once this cell has been executed you will need to read the new data in again if you want to work with it. new_X = None # Next we save the new training and validation sets locally. Note that we overwrite the training and validation sets used earlier. This is mostly because the amount of space that we have available on our notebook instance is limited. Of course, you can increase this if you'd like but to do so may increase the cost of running the notebook instance. # + pd.DataFrame(new_XV).to_csv(os.path.join(data_dir, 'new_data.csv'), header=False, index=False) pd.concat([new_val_y, new_val_X], axis=1).to_csv(os.path.join(data_dir, 'new_validation.csv'), header=False, index=False) pd.concat([new_train_y, new_train_X], axis=1).to_csv(os.path.join(data_dir, 'new_train.csv'), header=False, index=False) # - # Now that we've saved our data to the local instance, we can safely delete the variables to save on memory. new_val_y = new_val_X = new_train_y = new_train_X = new_XV = None # Lastly, we make sure to upload the new training and validation sets to S3. # # **TODO:** Upload the new data as well as the new training and validation data sets to S3. # + # TODO: Upload the new data and the new validation.csv and train.csv files in the data_dir directory to S3. # new_data_location = None # new_val_location = None # new_train_location = None # Solution: new_data_location = session.upload_data(os.path.join(data_dir, 'new_data.csv'), key_prefix=prefix) new_val_location = session.upload_data(os.path.join(data_dir, 'new_validation.csv'), key_prefix=prefix) new_train_location = session.upload_data(os.path.join(data_dir, 'new_train.csv'), key_prefix=prefix) # - # Once our new training data has been uploaded to S3, we can create a new XGBoost model that will take into account the changes that have occured in our data set. # # **TODO:** Create a new XGBoost estimator object. # + # TODO: First, create a SageMaker estimator object for our model. # new_xgb = None # Solution: new_xgb = sagemaker.estimator.Estimator(container, # The location of the container we wish to use role, # What is our current IAM Role train_instance_count=1, # How many compute instances train_instance_type='ml.m4.xlarge', # What kind of compute instances output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), sagemaker_session=session) # TODO: Then set the algorithm specific parameters. You may wish to use the same parameters that were # used when training the original model. # Solution: new_xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, silent=0, objective='binary:logistic', early_stopping_rounds=10, num_round=500) # - # Once the model has been created, we can train it with our new data. # # **TODO:** Train the new XGBoost model. # + # TODO: First, make sure that you create s3 input objects so that SageMaker knows where to # find the training and validation data. s3_new_input_train = None s3_new_input_validation = None # Solution: s3_new_input_train = sagemaker.s3_input(s3_data=new_train_location, content_type='csv') s3_new_input_validation = sagemaker.s3_input(s3_data=new_val_location, content_type='csv') # + # TODO: Using the new validation and training data, 'fit' your new model. # Solution: new_xgb.fit({'train': s3_new_input_train, 'validation': s3_new_input_validation}) # - # ### (TODO) Check the new model # # So now we have a new XGBoost model that we believe more accurately represents the state of the world at this time, at least in how it relates to the sentiment analysis problem that we are working on. The next step is to double check that our model is performing reasonably. # # To do this, we will first test our model on the new data. # # **Note:** In practice this is a pretty bad idea. We already trained our model on the new data, so testing it shouldn't really tell us much. In fact, this is sort of a textbook example of leakage. We are only doing it here so that we have a numerical baseline. # # **Question:** How might you address the leakage problem? # First, we create a new transformer based on our new XGBoost model. # # **TODO:** Create a transformer object from the newly created XGBoost model. # + # TODO: Create a transformer object from the new_xgb model # new_xgb_transformer = None # Solution: new_xgb_transformer = new_xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge') # - # Next we test our model on the new data. # # **TODO:** Use the transformer object to transform the new data (stored in the `new_data_location` variable) # + # TODO: Using new_xgb_transformer, transform the new_data_location data. You may wish to # 'wait' for the transform job to finish. # Solution: new_xgb_transformer.transform(new_data_location, content_type='text/csv', split_type='Line') new_xgb_transformer.wait() # - # Copy the results to our local instance. # !aws s3 cp --recursive $new_xgb_transformer.output_path $data_dir # And see how well the model did. predictions = pd.read_csv(os.path.join(data_dir, 'new_data.csv.out'), header=None) predictions = [round(num) for num in predictions.squeeze().values] accuracy_score(new_Y, predictions) # As expected, since we trained the model on this data, our model performs pretty well. So, we have reason to believe that our new XGBoost model is a "better" model. # # However, before we start changing our deployed model, we should first make sure that our new model isn't too different. In other words, if our new model performed really poorly on the original test data then this might be an indication that something else has gone wrong. # # To start with, since we got rid of the variable that stored the original test reviews, we will read them in again from the cache that we created in Step 3. Note that we need to make sure that we read in the original test data after it has been pre-processed with `nltk` but before it has been bag of words encoded. This is because we need to use the new vocabulary instead of the original one. # + cache_data = None with open(os.path.join(cache_dir, "preprocessed_data.pkl"), "rb") as f: cache_data = pickle.load(f) print("Read preprocessed data from cache file:", "preprocessed_data.pkl") test_X = cache_data['words_test'] test_Y = cache_data['labels_test'] # Here we set cache_data to None so that it doesn't occupy memory cache_data = None # - # Once we've loaded the original test reviews, we need to create a bag of words encoding of them using the new vocabulary that we created, based on the new data. # # **TODO:** Transform the original test data using the new vocabulary. # + # TODO: Use the new_vectorizer object that you created earlier to transform the test_X data. # test_X = None # Solution: test_X = new_vectorizer.transform(test_X).toarray() # - # Now that we have correctly encoded the original test data, we can write it to the local instance, upload it to S3 and test it. pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False) test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix) new_xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line') new_xgb_transformer.wait() # !aws s3 cp --recursive $new_xgb_transformer.output_path $data_dir predictions = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None) predictions = [round(num) for num in predictions.squeeze().values] accuracy_score(test_Y, predictions) # It would appear that our new XGBoost model is performing quite well on the old test data. This gives us some indication that our new model should be put into production and replace our original model. # ## Step 6: (TODO) Updating the Model # # So we have a new model that we'd like to use instead of one that is already deployed. Furthermore, we are assuming that the model that is already deployed is being used in some sort of application. As a result, what we want to do is update the existing endpoint so that it uses our new model. # # Of course, to do this we need to create an endpoint configuration for our newly created model. # # First, note that we can access the name of the model that we created above using the `model_name` property of the transformer. The reason for this is that in order for the transformer to create a batch transform job it needs to first create the model object inside of SageMaker. Since we've sort of already done this we should take advantage of it. new_xgb_transformer.model_name # Next, we create an endpoint configuration using the low level approach of creating the dictionary object which describes the endpoint configuration we want. # # **TODO:** Using the low level approach, create a new endpoint configuration. Don't forget that it needs a name and that the name needs to be unique. If you get stuck, try looking at the Boston Housing Low Level Deployment tutorial notebook. # + from time import gmtime, strftime # TODO: Give our endpoint configuration a name. Remember, it needs to be unique. # new_xgb_endpoint_config_name = None # Solution: new_xgb_endpoint_config_name = "sentiment-update-xgboost-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # TODO: Using the SageMaker Client, construct the endpoint configuration. # new_xgb_endpoint_config_info = None # Solution: new_xgb_endpoint_config_info = session.sagemaker_client.create_endpoint_config( EndpointConfigName = new_xgb_endpoint_config_name, ProductionVariants = [{ "InstanceType": "ml.m4.xlarge", "InitialVariantWeight": 1, "InitialInstanceCount": 1, "ModelName": new_xgb_transformer.model_name, "VariantName": "XGB-Model" }]) # - # Once the endpoint configuration has been constructed, it is a straightforward matter to ask SageMaker to update the existing endpoint so that it uses the new endpoint configuration. # # Of note here is that SageMaker does this in such a way that there is no downtime. Essentially, SageMaker deploys the new model and then updates the original endpoint so that it points to the newly deployed model. After that, the original model is shut down. This way, whatever app is using our endpoint won't notice that we've changed the model that is being used. # # **TODO:** Use the SageMaker Client to update the endpoint that you deployed earlier. # + # TODO: Update the xgb_predictor.endpoint so that it uses new_xgb_endpoint_config_name. # Solution: session.sagemaker_client.update_endpoint(EndpointName=xgb_predictor.endpoint, EndpointConfigName=new_xgb_endpoint_config_name) # - # And, as is generally the case with SageMaker requests, this is being done in the background so if we want to wait for it to complete we need to call the appropriate method. session.wait_for_endpoint(xgb_predictor.endpoint) # ## Step 7: Delete the Endpoint # # Of course, since we are done with the deployed endpoint we need to make sure to shut it down, otherwise we will continue to be charged for it. xgb_predictor.delete_endpoint() # ## Some Additional Questions # # This notebook is a little different from the other notebooks in this module. In part, this is because it is meant to be a little bit closer to the type of problem you may face in a real world scenario. Of course, this problem is a very easy one with a prescribed solution, but there are many other interesting questions that we did not consider here and that you may wish to consider yourself. # # For example, # - What other ways could the underlying distribution change? # - Is it a good idea to re-train the model using only the new data? # - What would change if the quantity of new data wasn't large. Say you only received 500 samples? # # ## Optional: Clean up # # The default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook. # + # First we will remove all of the files contained in the data_dir directory # !rm $data_dir/* # And then we delete the directory itself # !rmdir $data_dir # Similarly we will remove the files in the cache_dir directory and the directory itself # !rm $cache_dir/* # !rmdir $cache_dir # -
Mini-Projects/IMDB Sentiment Analysis - XGBoost (Updating a Model) - Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import astropy import numpy as np from astropy.table import Table from astropy.table import Column import glob import matplotlib.pyplot as plt import pandas as pd from collections import Counter from mpl_toolkits.mplot3d import Axes3D moe = .5 sample_location = "/home/hallflower/sample/spectra/" dash = "/mnt/c/users/20xha/Documents/Caltech/Research/DASH/" SEDM_ML_sample = Table.read("/mnt/c/Users/20xha/Documents/Caltech/Research/SEDM_ML_sample.ascii", format = "ascii") SEDM_ML_sample.rename_column('col1', 'ZTF_Name') SEDM_ML_sample.rename_column('col2', "Class") SEDM_ML_sample.rename_column('col8', "Version") snidoutput = np.load("/mnt/c/Users/20xha/Documents/Caltech/Research/SNID_results_rlap3.npy", allow_pickle=True) final_rcf_table = Table.from_pandas(pd.read_hdf("/mnt/c/users/20xha/Documents/Caltech/Research/final_rcf_table.h5")) final_rcf_table[0] # + ranges = np.linspace(0, 25, 101) z_output_snid = [] count = 0 for i in ranges: ZTable = Table( names=("ZTF_Name", "z_snid", "z_snid_err" ), meta={"name": "Spectrum Results after SNID"}, dtype=("U64", "float64", "float64" ) ) for j in snidoutput: row = [] row.append(j[0]) if(np.max(j[1]["rlap"]) > i): good = j[1][np.where(j[1]["grade"] == "good")] if(len(good) != 0): row.append(float(np.mean(good[0]["z"]))) row.append(float(np.sqrt(np.mean(good[0]["zerr"] ** 2)))) ZTable.add_row(row) count += 1 if(len(ZTable) != 0): z_output_snid.append([i,ZTable]) if(count% 100 == 0): print(count) z_output_snid = np.asarray(z_output_snid) # - z_joinedoutput_snid = [] for i in z_output_snid: JoinedResults = astropy.table.join(i[1], final_rcf_table) z_joinedoutput_snid.append([i[0], JoinedResults]) z_joinedoutput_snid = np.asarray(z_joinedoutput_snid) z_accuracy_snid = [] for table in z_joinedoutput_snid: right = 0 wrong = [] for row in table[1]: zerror = np.abs((row["z_snid"] - row["z_host"])/row["z_host"]) if(zerror < moe): right += 1 else: wrong.append(np.asarray([row["z_snid"], row["z_snid_err"], row["z_sn"]])) wrong = np.asarray(wrong) z_accuracy_snid.append([table[0],right,len(table[1]),wrong]) z_accuracy_snid = np.asarray(z_accuracy_snid) plt.scatter(z_accuracy_snid[:,0], z_accuracy_snid[:,1]/z_accuracy_snid[:,2]) dashoutput = np.load(dash+"output.npy",allow_pickle=True) # + ranges = np.linspace(0, 25, 101) z_output_dash = [] count = 0 for rlap in ranges: ZTable = Table( names=("ZTF_Name", "z_snid", "z_snid_err" ), meta={"name": "Spectrum Results after SNID"}, dtype=("U64", "float64", "float64" ) ) for i in dashoutput: row = [] row.append(i[-1]) rlap_list = [] for rlap_vals in np.asarray(i[3]): rlap_list.append(float(rlap_vals.split(":")[-1])) best_rlap = np.max(rlap_list) if(best_rlap > rlap): zlist = i[1] row.append(float((i[1][0]))) row.append(float(np.std(i[1]))) ZTable.add_row(row) count += 1 if(len(ZTable) != 0): z_output_dash.append([rlap,ZTable]) if(count% 100 == 0): print(count) z_output_dash = np.asarray(z_output_dash) # - z_joinedoutput_dash = [] for i in z_output_dash: JoinedResults = astropy.table.join(i[1], final_rcf_table) z_joinedoutput_dash.append([i[0], JoinedResults]) z_joinedoutput_dash = np.asarray(z_joinedoutput_dash) z_accuracy_dash = [] for table in z_joinedoutput_dash: right = 0 wrong = [] if(len(table[1]) != 0): for row in table[1]: zsnid = row["z_snid"] zactual = row["z_sn"] zerror = np.abs((row["z_snid"] - row["z_host"])/row["z_host"]) if(zerror < moe): right += 1 else: wrong.append(np.asarray([row["z_snid"], row["z_snid_err"], row["z_sn"]])) wrong = np.asarray(wrong) z_accuracy_dash.append([table[0],right,len(table[1]),wrong]) z_accuracy_dash = np.asarray(z_accuracy_dash) plt.scatter(z_accuracy_dash[:,0], z_accuracy_dash[:,1]/z_accuracy_dash[:,2]) right_actual = 0 wrong = [] for row in final_rcf_table: zerror = np.abs((row["z_sn"] - row["z_host"])/row["z_host"]) if(zerror < moe): right_actual += 1 else: wrong.append(np.asarray([row["z_sn"], row["z_host"]])) wrong = np.asarray(wrong) fig = plt.figure(figsize = (20,10)) plt.title("Accuracy of Redshift") plt.xlabel("rlap") plt.ylabel("Accuracy") plt.scatter(z_accuracy_snid[:,0], z_accuracy_snid[:,1]/z_accuracy_snid[:,2], color = "blue") plt.scatter(z_accuracy_dash[:,0], z_accuracy_dash[:,1]/z_accuracy_dash[:,2], color = "red") plt.axhline(y=right_actual/len(final_rcf_table), color='green', linestyle='-') final_rcf_table[0] z_joinedoutput_snid[0][1][0] plt.scatter(z_joinedoutput_dash[0][1]["z_host"], z_joinedoutput_dash[0][1]["z_snid"]) plt.title("Host Galaxy vs SNID") plt.xlabel("Host Galaxy Redshift") plt.ylabel("SNID Redshift") plt.xlim(0,.2) plt.ylim(-.01,.2) from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error, r2_score regr = linear_model.LinearRegression(fit_intercept = False) z_joinedoutput_snid_good = z_joinedoutput_snid[0][1][np.where(z_joinedoutput_snid[0][1]["z_host"] > -.1)[0]] regr.fit(z_joinedoutput_snid_good["z_host"].reshape(-1, 1), z_joinedoutput_snid_good["z_snid"].reshape(-1, 1)) ranges = np.linspace(np.min(z_joinedoutput_snid_good["z_host"]), np.max(z_joinedoutput_snid_good["z_host"]), 100) ypredict = regr.predict(ranges.reshape(-1, 1)) plt.errorbar(z_joinedoutput_snid_good["z_host"], z_joinedoutput_snid_good["z_snid"], z_joinedoutput_snid_good["z_snid_err"], fmt = "o", color = "blue") plt.title("Host Galaxy vs SNID (m = " + str(regr.coef_[0][0]) + ")") plt.xlabel("Host Galaxy Redshift") plt.ylabel("SNID Redshift") plt.plot(ranges, ypredict, color = "green") plt.plot(ranges, ranges, color = "black") regr_dash = linear_model.LinearRegression(fit_intercept = False) z_joinedoutput_dash_good = z_joinedoutput_dash[0][1][np.where(z_joinedoutput_dash[0][1]["z_host"] > -.1)[0]] regr_dash.fit(z_joinedoutput_dash_good["z_host"].reshape(-1, 1), z_joinedoutput_dash_good["z_snid"].reshape(-1, 1)) ranges_dash = np.linspace(np.min(z_joinedoutput_dash_good["z_host"]), np.max(z_joinedoutput_dash_good["z_host"]), 100) ypredict_dash = regr_dash.predict(ranges.reshape(-1, 1)) plt.scatter(z_joinedoutput_dash_good["z_host"], z_joinedoutput_dash_good["z_snid"], color = "red") plt.plot(ranges, ypredict, color = "green") plt.plot(ranges, ranges, color = "black") plt.title("Host Galaxy vs Dash (m = " + str(regr_dash.coef_[0][0]) + ")") plt.xlabel("Host Galaxy Redshift") plt.ylabel("Dash Redshift") plt.hist((z_joinedoutput_dash_good["z_snid"] - z_joinedoutput_dash_good["z_host"]), color = "red") plt.xlim(.1) plt.ylim(0,20) plt.xlabel("Dash Redshift - Galaxy Redshift") plt.title("Dash Residuals (deltaZ > 0.1)") plt.hist((z_joinedoutput_snid_good["z_snid"] - z_joinedoutput_snid_good["z_host"]), color = "blue") plt.xlim(.1) plt.ylim(0,8) plt.xlabel("SNID Redshift - Galaxy Redshift") plt.title("SNID Residuals (deltaZ > 0.1)") plt.hist((z_joinedoutput_dash_good["z_snid"] - z_joinedoutput_dash_good["z_host"])/z_joinedoutput_dash_good["z_host"], color = "red", bins = 20) plt.xlim(2) plt.ylim(0,20) plt.xlabel("(Dash Redshift - Galaxy Redshift) / Galaxy Redshift") plt.title("Dash delta Z ratio (> 2)") plt.hist((z_joinedoutput_snid_good["z_snid"] - z_joinedoutput_snid_good["z_host"])/z_joinedoutput_snid_good["z_host"], color = "blue", bins = 20) plt.xlim(2) plt.ylim(0,4) plt.xlabel("(SNID Redshift - Galaxy Redshift) / Galaxy Redshift") plt.title("SNID delta Z ratio (> 2)")
RedShift/SNIDRedshift.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Using Python to Query MongoDB # This notebook demonstrates additional MongoDB querying technicques using the **pymongo** library. As it's name implies, pymongo is the MongoDB library for Python, and its **documnentation** can be found here: https://pymongo.readthedocs.io/en/stable/index.html # # ### 1.0. Prerequisites # This demonstration uses an instance of **MongoDB Atlas** *(the MongoDB cloud service)*; therefore, you must first create a **free** *(Shared)* instance of that service. This can be accomplished by following the instructions at: https://docs.atlas.mongodb.com/tutorial/create-new-cluster/. # # If you prefer to use a local instance of MongoDB then you will have to import the **trips.json** file to create the collection we will be working with. This can either be accomplished using **MongoDB Compass**, or with sample code in the **06-Python-MongoDB-ETL** notebook. # # #### 1.1. Install the *pymongo* libary into your *python* environment by executing the following command in a *Terminal window* # - python -m pip install pymongo[srv] # # #### 1.2. Import the libaries that you'll be working with in the notebook, import os import datetime import pymongo import pandas as pd # ### 2.0. Connecting to the MongoDB Instance # + host_name = "localhost" port = "27017" atlas_cluster_name = "sandbox" atlas_default_dbname = "sample_airbnb" atlas_user_name = "m001-student" atlas_password = "<PASSWORD>" conn_str = {"local" : f"mongodb://{host_name}:{port}/", "atlas" : f"mongodb+srv://{atlas_user_name}:{atlas_password}@{atlas_<EMAIL>}.<EMAIL>/{atlas_default_dbname}?retryWrites=true&w=majority" } # - # #### 2.1. Interogate the MongoDB Atlas instance for the databases it hosts. client = pymongo.MongoClient(conn_str["atlas"]) client.list_database_names() # #### 2.2. Connect to the "*sample_training*" database, and interogate it for the the collections it contains. # + db_name = "sample_training" db = client[db_name] db.list_collection_names() # - # #### 2.3. Connect to the **trips** collection where we will be exploring a variety of querying techniques. # For example, the following query makes use of the **find_one()** method to select the first document in the collection for the purpose of inspecting the structure and contents of a sample document. Because each document may have a different schema, this single document can only give us a partial understanding of what the collection may contain. Notice that passing the *collection name* to the *database* object reference **db[ ]** returns a reference to the *collection* object. # + collection = "trips" trips = db[collection] trips.find_one() # - # ### 3.0. Using the MongoDB Query Language (MQL) # # The **find()** method returns a **cursor** containing all documents from the **collection** that match the filtering **conditions** that were provided. A **cursor** is required to *iterate* over the results because MongoDB manages **collections of documents** that contain **fields** rather than **tables of rows** that contain **columns** as we saw when studying relational database management systems like Microsoft SQL Server, Oracle and MySQL. # # #### 3.1. Specifying Conditions and Projections # When querying MongoDB, the **find()** method of the **collection** object accepts two possible parameters. First, one or more **conditions** are used to *filter* or restrict the documents that are returned. Second, and optionally, a **projection** can be defined to control which **fields** that are returned. The **conditions** are the equivalent of a SQL query's *ON, WHERE* and *HAVING* clauses, and the **projection** is the equivalent of a SQL query's *SELECT* list. # # The MongoDB (JSON) query syntax includes numerous conditional operators, all of which begin with the **\$** character (e.g., **\$lt** *(less than)*, **\$gt** *(greater than)*, **\$lte** *(less than or equal to)*, **\$gte** *(greater than or equal to)*). These operators can be used either alone or in concert with one another to perform exact matches and/or range matches. # # For example, the following query **excludes** the *_id* field and **includes** the *tripduration, bikeid and birth year* fields where the **tripduration** is *greater than* 90 seconds and *less than* 100 seconds, and the **birth year** is greater than or equal to *1970*. The results are then **sorted** by **trip duration** in descending order. # + # The SELECT list ----------------------------------------------- projection = {"_id": 0, "tripduration": 1, "bikeid": 1, "birth year": 1} # The WHERE clause ---------------------------------------------- conditions = {"tripduration":{"$gt": 90, "$lt": 100}, "birth year":{"$gte": 1970}} # The ORDER BY clause ------------------------------------------- orderby = [("tripduration", -1)] for trip in trips.find(conditions, projection).sort(orderby): print(trip) # - # ##### 3.1.1. Using the Pandas DataFrame # To make interacting with the *collection of documents* that are returned by the **find()** method much easier, we can use the Python **list()** method to *package* each document returned by the cursor into a Python **list** object that can then be passed to the *Pandas* **DataFrame()** constructor. This technique is very usefull for interacting with document collections having a common subset of fields available for **projection**. df = pd.DataFrame( list( db.trips.find(conditions, projection).sort(orderby) ) ) df # #### 3.1.2. Using Logical Operators # In structuring a list of **conditions**, it is implicit that the conditions are **cumulative**. In other words, each conditional expression builds upon all former conditions using **AND** logical operation. It is also possible to express **OR** logical operation using either the **\$in**, or **\$or** operators. # # First, the **\$in** operator functions identically to the **IN** operator of the Structured Query Language (SQL) that's used to interact with relational database management systems like Microsoft SQL Server, Oracle, MySQL and PostgreSQL in that its functionality enables matching multiple values for a single key (field). In the following query, all documents are returned where the **birth year** field contains either the value **1936, 1939** *or* **1943**. # + conditions = {"birth year" : {"$in" : [1936, 1939, 1943]}} df = pd.DataFrame( list(db.trips.find(conditions, projection).sort(orderby)) ) df # - # Conversely, the **\$nin** operator is used to express **NOT IN** logical operation. The following query returns all documents where the **birth year** field contains any values other than *1960, 1970* **or** *1980*. Also, here we rely on the **head()** function of the Pandas DataFrame object to specify the number of documents to return from the top *(head)* of the result-set; the default number of rows is 5. # + conditions = {"birth year" : {"$nin" : [1960, 1970, 1980]}} df = pd.DataFrame( list(db.trips.find(conditions, projection).sort(orderby) ) ) df.head() # - # Where it becomes necessary to match values regarding multiple keys (fields), the **\$or** operator can be used in a manner that's identical to the **OR** operator of the **SQL** language. The following query returns all documents where the **birth year** field contains the value *1988* **OR** the **start station id** field contains the value contains the value *270*. We also illustrate the **limit()** function being used to return a specified number of documents from the **top** of the result-set. # + projection = {"_id": 0, "start station id": 1, "birth year": 1, "tripduration": 1} conditions = {"$or" : [{"birth year" : 1988}, {"start station id" : 270}]} num_rows = 7 df = pd.DataFrame( list(db.trips.find(conditions, projection).sort(orderby).limit(num_rows) ) ) df # - # What's more, the **\$not** metaconditional operator can be used in concert with many other conditionals for the sake of *negating* the expression. # + condition = {"birth year" : {"$not" : {"$in" : [1960, 1965, 1970, 1975, 1980]}}} projection = {"_id": 0, "usertype": 1, "birth year": 1} df = pd.DataFrame( list(db.trips.find(conditions, projection).sort(orderby).limit(num_rows) ) ) df # - # ### 4.0. Using the MongoDB Aggregation Framework # The aggregation framework enables using a *pipeline* construct where the result of each element is passed to the next. # # #### 4.1. The Match and Project Stages: # In our first task we illustrate simply duplicating the behavior of the *MongoDB Query Language (MQL)* queries we've already seen. The following cell demonstrates how the **\$project** operator works in concert with the **\$match** operator to return the same results as an MQL query that specifies returning the **start station id** and **birth year** fields **where** the **birth year** is equal to **1941**. # + df = pd.DataFrame( list( db.trips.aggregate([ {"$project": {"start station id": 1, "birth year": 1, "_id": 0}}, {"$match": {"birth year": 1941}} ]) )) df # - # #### 4.2. The Group Stage # While the code listing above doesn't illustrate the power of the aggregation framework, the following demonstrates how the aggregation framework enables **grouping** document collections by specific criteria. # - In the first example below we demonstrate how to enumerate all the unique values in the **birth year** field greater than or equal to 1990 # - Then we show how to calculate the **count** of documents **having** the same **birth year**, returning only the **top 10 birth years** with the greatest **count**. # + df = pd.DataFrame( list( db.trips.aggregate([ {"$project": {"birth year": 1, "_id": 0}}, {"$match": {"birth year": {"$gte": 1990}}}, {"$group": {"_id": "$birth year"} } ]) )) df # + df = pd.DataFrame( list( db.trips.aggregate([ {"$project": {"birth year": 1, "_id": 0}}, {"$match": {"birth year": {"$gte": 1990}}}, {"$group": {"_id": "$birth year", "count": {"$sum": 1} } }, {"$sort": {"count": -1}}, {"$limit": 10} ]) )) df
02-Python/07-Python-MongoDB-QueryLab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Flow diagrams for our EAGE paper # I use `graphviz` to visualize some of the flow diagrams that goes to our paper from graphviz import Digraph # + fd = Digraph(name = 'polymer flow diagram', format='svg') fd.graph_attr['rankdir'] = 'LR' fd.node('gas', label='Natural gas') fd.node('raw', label='Raw material') fd.node('polymer', label='Polymer') fd.node('process', label='Polymer\nproduction', shape = 'box') fd.node('pump', label="Pump", shape='Mcircle') fd.node('seawater', label='Seawater') fd.node('watertreatment', label='Water\ntreatment', shape='box') fd.node('powerplant', label='Gas\nturbine', shape='box') fd.node('electricity', label='Electricity', color='red', style='dashed', fontcolor='red') fd.node('water', label='Water', color='blue') fd.node('separator', label='Lift and\nseparation\nprocess') fd.node('oil', label='Oil', shape='cylinder') fd.node('water_treatment', 'Water\ntreatment', shape='box') fd.node('reservoir', label='Reservoir', shape='box3d', style='filled', fillcolor='black', fontcolor='white', color='white') fd.edge('gas', 'process') fd.edge('gas', 'powerplant', style='dashed', color = 'red') fd.edge('raw', 'process') fd.edge('gas', 'process', style='dashed', color = 'red') fd.edge('process', 'polymer') fd.edge('polymer', 'pump') fd.edge('seawater', 'watertreatment') fd.edge('process', 'co2', style='dotted', color='gray') fd.edge('powerplant', 'co2', style='dotted', color='gray') fd.edge('powerplant', 'electricity', style='dashed', color='red') fd.edge('electricity', 'watertreatment', style='dashed', color='red') fd.edge('electricity', 'pump', style='dashed', color='red') fd.edge('watertreatment', 'water') fd.edge('water', 'pump') fd.edge('pump', 'reservoir') fd.edge('reservoir', 'separator') fd.edge('separator', 'oil') fd.edge('separator', 'water_treatment') fd.edge('electricity', 'water_treatment', style='dashed', color='red') fd.edge('electricity', 'separator', style='dashed', color='red') fd.node('co2', label='CO2\nemission', style='filled', fillcolor='gray', fontcolor='white') # fd.edge() fd.render() fd
analytical/notebooks/.ipynb_checkpoints/Graphs_EAGE_paper-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Lists # # ## Methods to be learned: # # - insert # - extend # - append # - index # - sort # - remove # - reverse # - pop # - count listOfThings = ['Apple','Bag','Laptop'] # ### Insert Method # # **insert** method takes two parameters: (position, value) and inserts the element at the given index, shifting elements to the right. # listOfThings.insert(3,'Book') listOfThings # ### Extend Method # # ** extend ** method takes a list of values to be added to the list. # # ### Important: # In extend method, if a single value is given instead of a list, then it creates a list out of that value, before appending it to the main list. listOfThings.extend(['Bottle','Mobile']) listOfThings listOfThings.extend('Calculator') listOfThings listOfThings listOfThings.index('Mobile') for i in listOfThings[listOfThings.index('Mobile')+1 : len(listOfThings)]: listOfThings.remove(i) listOfThings # ### Append Method # # ** append ** method adds a single element at the end of the list. # ### Important : # # Common error: does not return the new list, just modifies the original. listOfThings.append(['Head Phones','Specs']) listOfThings listOfThings.append('Paper') listOfThings listOfThings.remove( ['Head Phones', 'Specs']) listOfThings # ### Index method # # ** index ** method is used to find index position of the specified element. # # ### Important: # Throws a value error if the element is not present in the list. # # Alternative: use **in** keyword to check for element in a list. # # For example: 'x' **in** list listOfThings listOfThings.index('Book') listOfThings.index('Watch') 'Bag' in listOfThings 'Watch' in listOfThings # # ## Methods learned: # # - insert # - extend # - append # - index # # # ## Methods to be learned: # # - sort # - remove # - reverse # - pop # - count listOfThings # ### Sort Method # # Sorts the list lexicographically, for string lists and in ascending order for integer lists. listOfThings.sort() listOfThings numberList = [3,'akash',5.356, True, False] numberList.sort() numberList # ### Remove Method # # Searches for the **first** instance of the given element and removes it. # # ### Important: # # Throws ValueError if element is not present. numberList.remove(3) numberList numberList.remove('akash') numberList # ### Reverse Method # # Reverses the order of elements in the list. listOfThings listOfThings.reverse() listOfThings listOfThings.reverse() listOfThings # ### Pop Method # # Removes the element from the list, specified by the index, and returns the element. # # By default, removes the last element of the list. numberList numberList.pop() numberList numberList.pop(0) numberList # ### Count method # # Returns the frequency of occurrence of an element in the list. numberList.append(True) numberList.append(4.45) numberList.append(4) numberList numberList.count(True) # ### List Operations # #### Slicing listOfThings listOfThings[4:] listOfThings[:5] listOfThings[::-1] listOfThings[-4:-1] # ### Using Lists as Stacks listOfNums = [] listOfNums.append(3) listOfNums.append(5) listOfNums.append(1) listOfNums.append(7) listOfNums.append(8) listOfNums listOfNums.pop() listOfNums.pop() listOfNums.pop() listOfNums # ### Using Lists as Queues from collections import deque queue = deque(['John','Simon']) ## Initially in the queue queue queue.append('Steve') # Third Person comes in the queue. queue.append('Glenn') # Fourth Person comes in the queue. queue queue.popleft() # John leaves the queue first, since he arrived first. queue.popleft() # Simon leaves the queue next, since he arrived after John queue.appendleft('Sam') # Adding a new person at the start of the queue queue queue.rotate() # Rotates the queue queue queue.clear() # Clears the queue queue
Lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Developing an AI application # # Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. # # In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. # # <img src='assets/Flowers.png' width=500px> # # The project is broken down into multiple steps: # # * Load and preprocess the image dataset # * Train the image classifier on your dataset # * Use the trained classifier to predict image content # # We'll lead you through each part which you'll implement in Python. # # When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new. # # First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here. # + # Imports here import numpy as np import torch from torch import nn from torch import optim import torch.nn.functional as F import matplotlib.pyplot as plt from torchvision import datasets, transforms, models from collections import OrderedDict from PIL import Image import os # - # ## Load the data # # Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks. # # The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size. # # The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1. # data_dir = 'flowers' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' image_path = (test_dir + '/74/' + 'image_01191.jpg') save_dir='checkpoints/' # + if(os.path.exists(save_dir)): print ("Directory for save the model %s already exists" % save_dir) save_dir=save_dir+'checkpoint.pth' else: try: os.mkdir(save_dir) except OSError: print ("Creation of the directory %s failed, we use the root directory" % save_dir) save_dir='checkpoint.pth' else: print ("Successfully created the directory %s for save the model" % save_dir) save_dir=save_dir+'checkpoint.pth' # + # TODO: Define your transforms for the training, validation, and testing sets #data data_transforms = transforms.Compose([transforms.RandomRotation(30),transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])]) image_datasets = datasets.ImageFolder(train_dir, transform=data_transforms) image_data = torch.utils.data.DataLoader(image_datasets, batch_size=64, shuffle=True) #test testval_transforms = transforms.Compose([transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])]) image_testset = datasets.ImageFolder(test_dir, transform=testval_transforms) image_test = torch.utils.data.DataLoader(image_testset, batch_size=64, shuffle=True) #validation val_transforms = transforms.Compose([transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])]) image_valset = datasets.ImageFolder(valid_dir, transform=val_transforms) image_val = torch.utils.data.DataLoader(image_valset, batch_size=64, shuffle=True) # - # ### Label mapping # # You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers. # + import json with open('cat_to_name.json', 'r') as f: cat_to_name = json.load(f) #print(cat_to_name) # - # # Building and training the classifier # # Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features. # # We're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do: # # * Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use) # * Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout # * Train the classifier layers using backpropagation using the pre-trained network to get the features # * Track the loss and accuracy on the validation set to determine the best hyperparameters # # We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal! # # When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project. # # One last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to # GPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module. # # **Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again. epochs=4 learning_rate=0.001 print_every=10 hidden_sizes = [10200, 1020] # +10% total flowers - +10% --- use 7500 on pc output_size = 102 #total flowers arch='vgg16' #'mobilenet_v2' #vgg16 # + def define_model(): if arch=="vgg16": model = models.vgg16(pretrained=True) input_size = 25088 #32768 else: model = models.mobilenet_v2(pretrained=True) input_size = 1280 #freeze parameters - less memory used for param in model.parameters(): param.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('dropout',nn.Dropout(0.5)), ('fc1', nn.Linear(input_size, hidden_sizes[0])), ('relu1', nn.ReLU()), ('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])), ('relu2', nn.ReLU()), ('fc3', nn.Linear(hidden_sizes[1], output_size)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier=classifier return model model=define_model() #model= model.share_memory() model = model.cuda() # + #no reload criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), learning_rate) # + #no reload def cal_accuracy(mod, data): loss = 0 accuracy = 0 data_len=len(data) for i, (inputs,labels) in enumerate(data): inputs, labels = inputs.to('cuda') , labels.to('cuda') mod.to('cuda') with torch.no_grad(): outputs = mod.forward(inputs) loss = criterion(outputs,labels) ps = torch.exp(outputs).data equality = (labels.data == ps.max(1)[1]) accuracy += equality.type_as(torch.FloatTensor()).mean() loss = loss / data_len accuracy = accuracy /data_len return loss, accuracy # + #no reload def training(): print('start training') model.to('cuda') #model.share_memory() step=0 for epo in range(epochs): running_loss=0 #take the inputs and labels for the trainload for vai_int,(inputs,labels) in enumerate(image_data): step+=1 inputs, labels = inputs.to('cuda'), labels.to('cuda') optimizer.zero_grad() outputs=model.forward(inputs) loss= criterion(outputs,labels) loss.backward() optimizer.step() #print('still working ') running_loss+=loss.item() if step % print_every == 0: model.eval() #print('end validation model') val_loss,accuracy = cal_accuracy(model, image_val) print("Step nro: {} ".format(step), "Epoch: {}/{} ".format(1+epo, epochs), "Loss: {:.4f} ".format(running_loss), "Validation Loss {:.4f} ".format(val_loss), "Accuracy {:.4f} ".format(accuracy)) running_loss = 0 #torch.cuda.empty_cache() training() print('end training') # - # ## Testing your network # # It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well. # + # TODO: Do validation on the test set #no reload def testing(): correctos = 0 total = 0 model.eval() model.to('cuda') with torch.no_grad(): for inputs, labels in image_test: inputs, labels = inputs.to('cuda'), labels.to('cuda') outputs = model(inputs) aux , prediction = torch.max(outputs.data, 1) total += labels.size(0) tensor= (prediction == labels.data).sum() correctos+=tensor.item() accuracy=100 * correctos / total print('Total: {} - Correct: {} - Accuracy: {:.2f}% '.format(total,correctos,accuracy)) testing() # - # ## Save the checkpoint # # Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on. # # ```model.class_to_idx = image_datasets['train'].class_to_idx``` # # Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now. # + # TODO: Save the checkpoint #no reload model.class_to_idx = image_datasets.class_to_idx model_state={ 'learning_rate':learning_rate, 'epochs':epochs, 'hidden_sizes':hidden_sizes, 'output_size':output_size, 'state_dict':model.state_dict(), 'class_to_idx':model.class_to_idx, 'arch':arch } torch.save(model_state, save_dir) # - # ## Loading the checkpoint # # At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network. # + # TODO: Write a function that loads a checkpoint and rebuilds the model state_model = torch.load(save_dir) learning_rate=state_model['learning_rate'] epochs=state_model['epochs'] hidden_sizes=state_model['hidden_sizes'] output_size=state_model['output_size'] arch=state_model['arch'] model=define_model() model = model.cuda() model.class_to_idx=state_model['class_to_idx'] model.load_state_dict(state_model['state_dict']) print('model load') # - # # Inference for classification # # Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like # # ```python # probs, classes = predict(image_path, model) # print(probs) # print(classes) # > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339] # > ['70', '3', '45', '62', '55'] # ``` # # First you'll need to handle processing the input image such that it can be used in your network. # # ## Image Preprocessing # # You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. # # First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image. # # Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`. # # As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. # # And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions. # + def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' img = Image.open(image) image_transforms = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img = image_transforms(img) return img # show a image processed_image = process_image(image_path) print('image processed') # - # To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions). # + def imshow(image, ax=None, title=None): if ax is None: fig, ax = plt.subplots() # PyTorch tensors assume the color channel is the first dimension # but matplotlib assumes is the third dimension image = image.numpy().transpose((1, 2, 0)) # Undo preprocessing mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = std * image + mean # Image needs to be clipped between 0 and 1 or it looks like noise when displayed image = np.clip(image, 0, 1) ax.imshow(image) return ax imshow(processed_image) # - # ## Class Prediction # # Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values. # # To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well. # # Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes. # # ```python # probs, classes = predict(image_path, model) # print(probs) # print(classes) # > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339] # > ['70', '3', '45', '62', '55'] # ``` # + def predict(top_k=5): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' # TODO: Implement the code to predict the class from an image file model.eval() model.cpu() img = process_image(image_path) img = img.unsqueeze_(0) img = img.float() with torch.no_grad(): output = model.forward(img) probs, classes = torch.topk(output,top_k) probs = probs.exp() idx_to_class = {val: key for key, val in model.class_to_idx.items()} top_n = [idx_to_class[each] for each in classes.cpu().numpy()[0]] return probs, top_n probs, classes = predict() labels = [] for index in classes: labels.append(cat_to_name[str(index)]) print('Name of the given image: ', labels[0]) probs=probs[0] for name, prob in zip(labels, probs): print("Name of class and probability {}: {:6f}".format(name, prob)) print(probs) print(classes) # - # ## Sanity Checking # # Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this: # # <img src='assets/inference_example.png' width=300px> # # You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above. # + # TODO: Display an image along with the top 5 classes def sanity_checking(): plt.rcParams["figure.figsize"] = (3,3) plt.rcParams.update({'font.size': 12}) # Showing actual image #image_path = (test_dir + '/37/' + 'image_03783.jpg') probs, classes = predict() image_to_show = process_image(image_path) image = imshow(image_to_show, ax = plt) image.axis('off') image.title(cat_to_name[str(classes[0])]) image.show() # Showing Top Classes labels = [] for class_index in classes: labels.append(cat_to_name[str(class_index)]) y_pos = np.arange(len(labels)) probs = probs[0] plt.barh(y_pos, probs, align='center', color='red') plt.yticks(y_pos, labels) plt.xlabel('Probability') plt.title('Top Classes') plt.show() sanity_checking() # -
Image Classifier Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="_CMW13uMBlpk" # # はじめに # # Chainer チュートリアルへようこそ。 # # このチュートリアルは、機械学習やディープラーニングの仕組みや使い方を理解したい**大学学部生**以上の方に向けて書かれたオンライン学習資料です。 # # 機械学習の勉強を進めるために必要な数学の知識から、Python というプログラミング言語を用いたコーディングの基本、機械学習・ディープラーニングの基礎的な理論、画像認識や自然言語処理などに機械学習を応用する方法に至るまで、幅広いトピックを解説しています。 # # 機械学習を学び始めようとすると、ある程度、線形代数や確率統計といった数学の知識から、何らかのプログラミング言語が使えることなどが必要となってきます。 # しかし、そういった数学やプログラミングの全てに精通していなければ機械学習について学び始められないかというと、必ずしもそうではありません。 # # 本チュートリアルでは、機械学習やディープラーニングに興味を持った方が、まず必要になる最低限の数学とプログラミングの知識から学び始められるように、資料を充実させています。 # # そのため、できる限りこのサイト以外の教科書や資料を探さなくても、**このサイトだけで機械学習・ディープラーニングに入門できる**ことを目指して、作られています。初学者の方が「何から学び始めればいいのか」と迷うことなく学習を始められることを目指したサイトです。 # # また、本チュートリアルの特徴として、資料の中に登場するコードが、Google Colaboratory というサービスを利用することで**そのままブラウザ上で実行できるようになっている**という点があります。 # # ブラウザだけでコードを書き、実行して、結果を確認することができれば、説明に使われたサンプルコードを実行して結果を確かめるために、手元のコンピュータで環境構築を行う必要がなくなります。 # # 本章ではまず、この **Google Colaboratory** というサービスの利用方法を説明します。 # + [markdown] colab_type="text" id="TK3cXCQuBlpm" # ## 必要なもの # # - Google アカウント(お持ちでない場合は、こちらからお作りください:[Google アカウントの作成](https://accounts.google.com/signup)) # - ウェブブラウザ( Google Colaboratory はほとんどの主要なブラウザで動作します。PC 版の Chrome と Firefox では動作が検証されています。) # + [markdown] colab_type="text" id="D3QflLv0qdiy" # ## Google Colaboratory の基本 # # Google Colaboratory(以下 Colab )は、クラウド上で [Jupyter Notebook](https://jupyter.org/) 環境を提供する Google のウェブサービスです。Jupyter Notebook はブラウザ上で主に以下のようなことが可能なオープンソースのウェブアプリケーションであり、データ分析の現場や研究、教育などで広く用いられています。 # # - プログラムを実行と、その結果の確認 # - Markdown と呼ばれる文章を記述するためのマークアップ言語を使った、メモや解説などの記述の追加 # # Colab では無料で GPU も使用することができますが、そのランタイムは**最大 12 時間**で消えてしまうため、長時間を要する処理などは別途環境を用意する必要があります。 # 学びはじめのうちは、数分から数時間程度で終わる処理がほとんどであるため、気にする必要はありませんが、本格的に使っていく場合は有料のクラウドサービスを利用するなどして、環境を整えるようにしましょう。 # # 以降では、その基本的な使い方を説明します。 # + [markdown] colab_type="text" id="SNVvaFdwBlpn" # ### Colab を開く # # まずは以下のURLにアクセスして、ブラウザで Colab を開いてください。 # # [https://colab.research.google.com/](https://colab.research.google.com/) # # 「Colaboratory へようこそ」というタイトルの Jupyter Notebook が表示されます。 # # 次に、タイトルの下にある 「ファイル」 から、「Python 3 の新しいノートブック」 を選択し、まっさらな Jupyter Notebook を作成しましょう。 # # ![create new notebook](images/01/01_create_new_notebook.png) # # Google アカウントにまだログインしていなかった場合は、以下のようなメッセージが表示されます。 # # ![please login](images/01/01_02.png) # # その場合は、「ログイン」 をクリックして、Google アカウントでログインしてください。 # # ログインが完了すると、以下のような画面が表示され、準備完了です。 # もうすでに Python を使ったプログラミングを開始する準備が整っています。 # # ![new python3 notebook](images/01/01_03.png) # + [markdown] colab_type="text" id="SWEJAKWMBlpo" # ### Open in Colab ボタン # # このチュートリアルの一部の章には、`Open in Colab` と書かれた以下のようなボタンがページ上部に設置されています。 # # [![open in colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/kandalva/tutorials/blob/master/ja/01_Welcome_to_Chainer_Tutorial_ja.ipynb) # # このボタンを押すと、ブラウザで見ている資料が、Colab 上で Jupyter Notebook として開かれます。 # すると、チュートリアルの中で説明に用いられているコードを、**実際に実行して結果を確認することができます。** # # それでは、早速上のボタンか、このページの上部に配置されている `Open in Colab` ボタンを押して、このページを Colab で開いてください。 # すると、`Playground モード` という編集不可な状態でノートブックが Colab 上で開かれます。 # そこで、下図の位置にある `ドライブにコピー` というボタンを押して、自分の Google Drive 上にこのノートブックをコピーしてください。 # このボタンを押すと、コピーされたノートブックが自動的に開き、以降は内容に編集を加えたり、コードを実行したりすることができます。 # # ![copy to mydrive](images/01/01_04.png) # # この # # 1. `Open in Colab` から Colab へ移動 # 2. 自分のドライブへノートブックをコピーする # 3. コードを実行しながら解説を読んでいく # # という手順が、本チュートリアルサイトのおすすめの利用方法です。 # + [markdown] colab_type="text" id="k4ak2UP9Blpp" # ## Colab の基本的な使い方 # # Colab 上の Jupyter Notebook を以降、単に**ノートブック**と呼びます。 # # ノートブックは、複数の**セル**と呼ばれるブロックを持つことができます。 # 新しいノートブックを作った直後では、何も書かれていないセルが一つだけ存在している状態になっています。 # セルの内側のどこかをクリックすると、そのセルを選択することができます。 # # セルには、**コードセル**と**テキストセル**の 2 種類があります。 # **コードセル** は Python のコードを書き込み、実行するためのセルであり、**テキストセル**は、Markdown 形式で文章を書くためのセルです。 # # それぞれのセルタイプについてもう少し詳しく説明をします。 # + [markdown] colab_type="text" id="9hYkVljaBlpq" # ### コードセル # # コードセルは、Python のコードを書き込み、実行することができるセルです。 # 実行するには、コードセルを選択した状態で、`Ctrl + Enter` または `Shift + Enter` を押します。 # 試しに、下のセルを選択して、`Ctrl + Enter` を押してみてください。 # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="EaOJalpbBlpr" outputId="40291477-aa22-4151-da78-d2ae6d2eb627" print('Hello world!') # + [markdown] colab_type="text" id="8QsnHDylXQrb" # すぐ下に、Hello world! という文字列が表示されました。 # 上のセルに書き込まれているのは Python のコードで、与えられた文字列を表示する関数である `print()` に、`'Hello world!'` という文字列を渡しています。 # これを今実行したため、その結果が下に表示されています。 # # プログラミング言語の Python については、[次の章](https://tutorials.chainer.org/ja/02_Basics_of_Python.html) でより詳しく解説します。 # + [markdown] colab_type="text" id="f7vtQ2SmBlpx" # ### テキストセル # # テキストセルでは、Markdown 形式で記述された文章を扱います。 # 試しに、このセルを**ダブルクリック**してみてください。 # テキストセルが編集モードになり、Markdown 形式で文章を装飾するための、先程までは表示されていなかった記号が見えるようになります。 # # その状態で `Shift + Enter` を押してみましょう。 # # もとのレンダリングされた文章の表示に戻ります。 # + [markdown] colab_type="text" id="wEwqOW9bBlpy" # ### Colab から Google Drive を使う # # Google Drive というオンラインストレージサービスを Colab で開いたノートブックから利用することができます。 # ノートブック中でコードを実行して作成したファイルなどを保存したり、逆に Google Drive 上に保存されているデータを読み込んだりすることができます。 # # Colab 上のノートブックから Google Drive を使うには、Colab 専用のツールを使って、`/content/drive` というパスに現在ログイン中の Google アカウントが持っている Google Drive のスペースをマウントします。 # + colab={} colab_type="code" id="TI3-V_gN3Ekr" from google.colab import drive drive.mount('/content/drive') # + [markdown] colab_type="text" id="zllU5vanBlp2" # このノートブックを Colab で開いてから初めて上のコードセルを実行した場合は、以下のようなメッセージが表示されます。 # # ![please authorize](images/01/01_05.png) # # 指示に従って表示されているURLへアクセスしてください。 # すると、「アカウントの選択」と書かれたページに飛び、すでにログイン済みの場合はログイン中の Google アカウントのアイコンやメールアドレスが表示されています。 # 利用したいアカウントをクリックして、次に進んで下さい。 # すると次に、`Google Drive File Stream が Google アカウントへのアクセスをリクエストしています` と書かれたページに飛びます。 # # ![access request](images/01/01_06.png) # # 右下に「許可」と書かれたボタンが見えます。 # こちらをクリックしてください。 # すると以下のように認証コードが記載されたページへ移動します。 # # ![access code](images/01/01_07.png) # # (この画像では認証コード部分をぼかしています) # このコードを選択してコピーするか、右側にあるアイコンをクリックしてコピーしてください。 # # 元のノートブックへ戻り、`Enter your authorization code:` というメッセージの下にある空欄に、先程コピーした認証コードを貼り付けて、Enter キーを押してください。 # # **Mounted at /content/drive** と表示されたら、準備は完了です。 # # 以下のセルを実行して、自分の Google Drive が Colab からアクセス可能になっていることを確認してください。 # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="osShvuIQ3GFy" outputId="e244c570-af51-4af8-a9f1-5a68da0fa41b" # 'My Drive'の表記が出ていればマウントがうまく行われています。 # !ls 'drive/' # + [markdown] colab_type="text" id="DbvFPwpova8M" # 上のセルで実行しているのは Python のコードではありません。 # Jupyter Notebook では、コードセル中で `!` が先頭に付いている行は特別に解釈されます。`!ls` は、次に続くディレクトリの中にあるファイルまたはディレクトリの一覧を表示せよ、という意味です([注釈1](#note1))。 # + [markdown] colab_type="text" id="jZNTuBQ54BSu" # ### Colab の便利なショートカット # # Colab を使用中に、セルのタイプの変更やセルの複製・追加などの操作をする場合は、メニューから該当する項目を選ぶ方法以外に、キーボードショートカットを利用する方法もあります。 # # 下記によく使う**ショートカットキー**をまとめておきます。 # 多くのショートカットキーは**二段階**になっており、まず `Ctrl + M` を押してから、それぞれの機能によって異なるコマンドを入力する形になっています。 # # | 説明 | コマンド | # | -------------------- | ------------- | # | Markdownモードへ変更 | Ctrl + M → M | # | Codeモードへ変更 | Ctrl + M → Y | # | セルの実行 | Shift + Enter | # | セルを上に追加 | Ctrl + M → A | # | セルを下に追加 | Ctrl + M → B | # | セルのコピー | Ctrl + M → C | # | セルの貼り付け | Ctrl + M → V | # | セルの消去 | Ctrl + M → D | # | コメントアウト | Ctrl + / | # # コメントアウトとは、コード中で実行時に無視したい行やコメントを選択した状態で行う操作です。 # Python では、`#` の後に続く文字列は全て、コメントとして無視され、実行時に評価されることはありません。 # + [markdown] colab_type="text" id="44vOyaBKEk3m" # ### GPU を使用する # # Colab では GPU を無料で使用することができます。 # 初期設定では GPU を使用しない設定となっているため、GPU を使用する場合は設定を変更する必要があります。 # # GPU を使用する場合は、画面上部のタブの中の 「Runtime」 (または「ランタイム」) をクリックし、「Change runtime type」 (または「ランタイムのタイプを変更」)を選択します。 # # そして、下記の画像の様に 「Hardware accelerator」 (または「ハードウェアアクセラレータ」)を GPU に変更します。 # # ![GPUの設定](images/01/01_08.png) # # これで Colab 上で GPU を使用できるようになりました。 # # # + [markdown] colab_type="text" id="QoQHVO6rva8O" # これで、チュートリアルの本編に入っていく準備が完了しました。次の章では、Python というプログラミング言語の基本について解説します。 # + [markdown] colab_type="text" id="rot1jrxLy47Y" # <hr /> # <div class="alert alert-info"> # **注釈 1** # # `ls` はシェルコマンドの 1 つです。 # # [▲上へ戻る](#ref_note1) # </div> #
ja/01_Welcome_to_Chainer_Tutorial.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,.pct.py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # Manipulating GPflow models # # One of the key ingredients in GPflow is the model class, which enables you to carefully control parameters. This notebook shows how some of these parameter control features work, and how to build your own model with GPflow. First we'll look at: # # - how to view models and parameters # - how to set parameter values # - how to constrain parameters (for example, variance > 0) # - how to fix model parameters # - how to apply priors to parameters # - how to optimize models # # Then we'll show how to build a simple logistic regression model, demonstrating the ease of the parameter framework. # # GPy users should feel right at home, but there are some small differences. # # First, let's deal with the usual notebook boilerplate and make a simple GP regression model. See [Basic (Gaussian likelihood) GP regression model](../basics/regression.ipynb) for specifics of the model; we just want some parameters to play with. # %% import numpy as np import gpflow import tensorflow_probability as tfp from gpflow.utilities import print_summary, set_trainable, to_default_float # %% [markdown] # We begin by creating a very simple GP regression model: # %% # generate toy data np.random.seed(1) X = np.random.rand(20, 1) Y = np.sin(12 * X) + 0.66 * np.cos(25 * X) + np.random.randn(20, 1) * 0.01 m = gpflow.models.GPR((X, Y), kernel=gpflow.kernels.Matern32() + gpflow.kernels.Linear()) # %% [markdown] # ## Viewing, getting, and setting parameters # You can display the state of the model in a terminal by using `print_summary(m)`. You can change the display format using the `fmt` keyword argument, e.g. `'html'`. In a notebook, you can also use `fmt='notebook'` or set the default printing format as `notebook`: # %% print_summary(m, fmt="notebook") # %% gpflow.config.set_default_summary_fmt("notebook") # %% [markdown] # This model has four parameters. The kernel is made of the sum of two parts. The first (counting from zero) is a Matern32 kernel that has a variance parameter and a lengthscales parameter; the second is a linear kernel that has only a variance parameter. There is also a parameter that controls the variance of the noise, as part of the likelihood. # # All the model variables have been initialized at `1.0`. You can access individual parameters in the same way that you display the state of the model in a terminal; for example, to see all the parameters that are part of the likelihood, run: # %% print_summary(m.likelihood) # %% [markdown] # This gets more useful with more complex models! # %% [markdown] # To set the value of a parameter, just use `assign()`: # %% m.kernel.kernels[0].lengthscales.assign(0.5) m.likelihood.variance.assign(0.01) print_summary(m, fmt="notebook") # %% [markdown] # ## Constraints and trainable variables # # GPflow helpfully creates an unconstrained representation of all the variables. In the previous example, all the variables are constrained positively (see the **transform** column in the table); the unconstrained representation is given by $\alpha = \log(\exp(\theta)-1)$. The `trainable_parameters` property returns the constrained values: # %% m.trainable_parameters # %% [markdown] # Each parameter has an `unconstrained_variable` attribute that enables you to access the unconstrained value as a TensorFlow `Variable`. # %% p = m.kernel.kernels[0].lengthscales p.unconstrained_variable # %% [markdown] # You can also check the unconstrained value as follows: # %% p.transform.inverse(p) # %% [markdown] # Constraints are handled by the Bijector classes from the `tensorflow_probability` package. You might prefer to use the constraint $\alpha = \log(\theta)$; this is easily done by replacing the parameter with one that has a different `transform` attribute (here we make sure to copy all other attributes across from the old parameter; this is not necessary when there is no `prior` and the `trainable` state is still the default of `True`): # %% old_parameter = m.kernel.kernels[0].lengthscales new_parameter = gpflow.Parameter( old_parameter, trainable=old_parameter.trainable, prior=old_parameter.prior, name=old_parameter.name.split(":")[0], # tensorflow is weird and adds ':0' to the name transform=tfp.bijectors.Exp(), ) m.kernel.kernels[0].lengthscales = new_parameter # %% [markdown] # Though the lengthscale itself remains the same, the unconstrained lengthscale has changed: # %% p.transform.inverse(p) # %% [markdown] # You can also change the `transform` attribute in place: # %% m.kernel.kernels[0].variance.transform = tfp.bijectors.Exp() # %% print_summary(m, fmt="notebook") # %% [markdown] # ## Changing whether a parameter will be trained in optimization # # Another helpful feature is the ability to fix parameters. To do this, simply set the `trainable` attribute to `False`; this is shown in the **trainable** column of the representation, and the corresponding variable is removed from the free state. # %% set_trainable(m.kernel.kernels[1].variance, False) print_summary(m) # %% m.trainable_parameters # %% [markdown] # To unfix a parameter, just set the `trainable` attribute to `True` again. # %% set_trainable(m.kernel.kernels[1].variance, True) print_summary(m) # %% [markdown] # **NOTE:** If you want to recursively change the `trainable` status of an object that *contains* parameters, you **must** use the `set_trainable()` utility function. # # A module (e.g. a model, kernel, likelihood, ... instance) does not have a `trainable` attribute: # %% try: m.kernel.trainable except AttributeError: print(f"{m.kernel.__class__.__name__} does not have a trainable attribute") # %% set_trainable(m.kernel, False) print_summary(m) # %% [markdown] # ## Priors # # You can set priors in the same way as transforms and trainability, by using `tensorflow_probability` distribution objects. Let's set a Gamma prior on the variance of the Matern32 kernel. # %% k = gpflow.kernels.Matern32() k.variance.prior = tfp.distributions.Gamma(to_default_float(2), to_default_float(3)) print_summary(k) # %% m.kernel.kernels[0].variance.prior = tfp.distributions.Gamma( to_default_float(2), to_default_float(3) ) print_summary(m) # %% [markdown] # ## Optimization # # To optimize your model, first create an instance of an optimizer (in this case, `gpflow.optimizers.Scipy`), which has optional arguments that are passed to `scipy.optimize.minimize` (we minimize the negative log likelihood). Then, call the `minimize` method of that optimizer, with your model as the optimization target. Variables that have priors are maximum a priori (MAP) estimated, that is, we add the log prior to the log likelihood, and otherwise use Maximum Likelihood. # %% opt = gpflow.optimizers.Scipy() opt.minimize(m.training_loss, variables=m.trainable_variables) # %% [markdown] # ## Building new models # # To build new models, you'll need to inherit from `gpflow.models.BayesianModel`. # Parameters are instantiated with `gpflow.Parameter`. # You might also be interested in `gpflow.Module` (a subclass of `tf.Module`), which acts as a 'container' for `Parameter`s (for example, kernels are `gpflow.Module`s). # # In this very simple demo, we'll implement linear multiclass classification. # # There are two parameters: a weight matrix and a bias (offset). You can use # Parameter objects directly, like any TensorFlow tensor. # # The training objective depends on the type of model; it may be possible to # implement the exact (log)marginal likelihood, or only a lower bound to the # log marginal likelihood (ELBO). You need to implement this as the # `maximum_log_likelihood_objective` method. The `BayesianModel` parent class # provides a `log_posterior_density` method that returns the # `maximum_log_likelihood_objective` plus the sum of the log-density of any priors # on hyperparameters, which can be used for MCMC. # GPflow provides mixin classes that define a `training_loss` method # that returns the negative of (maximum likelihood objective + log prior # density) for MLE/MAP estimation to be passed to optimizer's `minimize` # method. Models that derive from `InternalDataTrainingLossMixin` are expected to store the data internally, and their `training_loss` does not take any arguments and can be passed directly to `minimize`. # Models that take data as an argument to their `maximum_log_likelihood_objective` method derive from `ExternalDataTrainingLossMixin`, which provides a `training_loss_closure` to take the data and return the appropriate closure for `optimizer.minimize`. # This is also discussed in the [GPflow with TensorFlow 2 notebook](../intro_to_gpflow2.ipynb). # %% import tensorflow as tf class LinearMulticlass(gpflow.models.BayesianModel, gpflow.models.InternalDataTrainingLossMixin): # The InternalDataTrainingLossMixin provides the training_loss method. # (There is also an ExternalDataTrainingLossMixin for models that do not encapsulate data.) def __init__(self, X, Y, name=None): super().__init__(name=name) # always call the parent constructor self.X = X.copy() # X is a NumPy array of inputs self.Y = Y.copy() # Y is a 1-of-k (one-hot) representation of the labels self.num_data, self.input_dim = X.shape _, self.num_classes = Y.shape # make some parameters self.W = gpflow.Parameter(np.random.randn(self.input_dim, self.num_classes)) self.b = gpflow.Parameter(np.random.randn(self.num_classes)) # ^^ You must make the parameters attributes of the class for # them to be picked up by the model. i.e. this won't work: # # W = gpflow.Parameter(... <-- must be self.W def maximum_log_likelihood_objective(self): p = tf.nn.softmax( tf.matmul(self.X, self.W) + self.b ) # Parameters can be used like a tf.Tensor return tf.reduce_sum(tf.math.log(p) * self.Y) # be sure to return a scalar # %% [markdown] # ...and that's it. Let's build a really simple demo to show that it works. # %% np.random.seed(123) X = np.vstack( [ np.random.randn(10, 2) + [2, 2], np.random.randn(10, 2) + [-2, 2], np.random.randn(10, 2) + [2, -2], ] ) Y = np.repeat(np.eye(3), 10, 0) import matplotlib.pyplot as plt plt.style.use("ggplot") # %matplotlib inline plt.rcParams["figure.figsize"] = (12, 6) _ = plt.scatter(X[:, 0], X[:, 1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis) # %% m = LinearMulticlass(X, Y) m # %% opt = gpflow.optimizers.Scipy() opt.minimize(m.training_loss, variables=m.trainable_variables) # %% xx, yy = np.mgrid[-4:4:200j, -4:4:200j] X_test = np.vstack([xx.flatten(), yy.flatten()]).T f_test = np.dot(X_test, m.W.read_value()) + m.b.read_value() p_test = np.exp(f_test) p_test /= p_test.sum(1)[:, None] # %% plt.figure(figsize=(12, 6)) for i in range(3): plt.contour(xx, yy, p_test[:, i].reshape(200, 200), [0.5], colors="k", linewidths=1) _ = plt.scatter(X[:, 0], X[:, 1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis) # %% [markdown] # That concludes the new model example and this notebook. You might want to see for yourself that the `LinearMulticlass` model and its parameters have all the functionality demonstrated here. You could also add some priors and run Hamiltonian Monte Carlo using the HMC optimizer `gpflow.train.HMC` and its `sample` method. See [Markov Chain Monte Carlo (MCMC)](../advanced/mcmc.ipynb) for more information on running the sampler.
doc/source/notebooks/understanding/models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + slideshow={"slide_type": "-"} """This area sets up the Jupyter environment. Please do not modify anything in this cell. """ import os import sys # Add project to PYTHONPATH for future use sys.path.insert(1, os.path.join(sys.path[0], '..')) # Import miscellaneous modules from IPython.core.display import display, HTML # Set CSS styling with open('../admin/custom.css', 'r') as f: style = """<style>\n{}\n</style>""".format(f.read()) display(HTML(style)) # + [markdown] slideshow={"slide_type": "slide"} # # Outline # # <div class="alert alert-warning"> # The following notebook will go through the basics of **supervised learning**. # </div> # # In supervised learning we assume that our data consist of **input - output** pairs. A learning algorithm analyses the data and produces a function, or model, we can use to infer *outputs* given unseen future *inputs*. # # Below we can see a simplified illustration of the supervised learning problem. # # Pairs of inputs $\mathbf{x}$ and outputs $y$ constitutes our training examples, where the inputs are sampled from a probability distribution. A pair $(\mathbf{x}, y)$ is related by an *unknown* target function $f$ governed by a conditional probability distribution. The ultimate goal of supervised learning is to learn a function $g$ which approximates $f$ well. # # The particular approximation $g$ we pick is called a hypothesis. A learning algorithm is responsible for picking the most appropriate hypothesis from a hypothesis set. The decision between which hypothesis to pick is done by looking at the *data* and typically involves an error function which measures how good a hypothesis may be. # # <img src="resources/supervised-learning.png" alt="Supervised Learning" width="700" /> # # When our learning algorithm has picked a good hypothesis, we can feed it new and unseen samples to produce output estimates. # # The name of the data typically differ depending on which area you are from. # # The **input** variables are commonly known as: # # - covariates # - predictors # - features # # The **output** variables are commonly known as: # # - variates # - targets # - labels # - # ## Linear Models: Regression # # For now we will focus on one of the simplest supervised learning problems: *linear regression*. # # A linear regression model learns a real-valued function where one or more dependent output variable(s) *depend* linearly on one or more independent input variable(s). Geometrically, this real-valued function can be interpreted as a hyperplane which we attempt to fit to our data. # # # ### Motivation # # * Allows us to investigate the relationship between two or more variables statistically # * Can be thought of as a building block of artificial neural networks # * A solution can be found analytically or using data-driven optimisation # * Basic introduction to supervised learning # * Introduces you to the Python programming language and Jupyter notebook usage # # <img src="https://imgs.xkcd.com/comics/machine_learning.png" alt="xkcd" width="300" /> # ## Notation # # This notebook will use the following notation: # # * A (training) dataset has $N$ input - output pairs: $(\mathbf{x}_i, y_i)$, where $i$ signifies the $i$th example # * Each input $\mathbf{x}_i$ is a $d$ dimensional column vector: $\mathbf{x}_i \in \mathbb{R}^d$ # * For this notebook we will assume the output to be univariate: $y \in \mathbb{R}$ # # Keep in mind that additional notation will be introduced as we continue through the notebooks. # # Example: Income vs. Education # # In the following example we will load data from a CSV file and use to estimate a linear model between an `Education index` and a `Income index`. # # * **input** $\rightarrow$ Scalar metric indicating level of education # * **output** $\rightarrow$ Scalar metric indication level of income # # <div class="alert alert-info"> # <strong>In the follow code snippets we will:</strong> # <ul> # <li>Load data from a CSV file</li> # <li>Plot the data</li> # </ul> # </div> # First, let's begin by importing a selection of Python package that will prove useful for the rest of this Jupyter notebook. # + # Plots will be show inside the notebook # %matplotlib notebook import matplotlib.pyplot as plt # NumPy is a package for manipulating N-dimensional array objects import numpy as np # Pandas is a data analysis package import pandas as pd import problem_unittests as tests # - # With Pandas we can load the aforementioned CSV data. # + # Load data and print the first n = 5 rows # URL: http://www-bcf.usc.edu/~gareth/ISL/Income1.csv DATA_URL = './resources/Income1.csv' data = pd.read_csv(DATA_URL, index_col=0) print(data.head(n=5)) # Put the second (education index) and third (income index) row in a NumPy array X_data = data['Education'].values y_data = data['Income'].values # - # With the data loaded we can plot it as a scatter plot using matplotlib. # + plt.figure() plt.scatter(X_data, y_data, label='Training data') plt.title('Education vs. Income') plt.xlabel('Education index') plt.ylabel('Income index') plt.grid(linestyle='dotted') plt.legend() plt.show() # - # ## Modelling # # As previously mentioned, we will be using a linear model. That is, the output will be a linear combination of the input plus a bias or intercept: # # $$ # \begin{equation*} # g(\mathbf{x}) = b + \sum_{j=1}^{d}w_jx_j # \end{equation*} # $$ # # Keep in mind that in this problem there is only a single independent variable $\mathbf{x}$, which means the above can be simplified to: $g(x) = b + wx$, where $b$ is the intercept and $w$ is the slope. # # # ### Notational Simplifications # # To simplify notation, it is quite common to merge the bias $b$ with the weights $w_i$ to get a single weight vector $\mathbf{w} = (w_0, w_1, \ldots, w_d)^\intercal$, where $w_0 = b$. Consequently, an extra dimension must be prepended to the input vector, i.e. $\mathbf{x} = (1, x_1, \ldots, x_d)^\intercal$. # # With this simplification the linear model can be written as: # # $$ # \begin{equation*} # g(\mathbf{x}) = \sum_{j=1}^{d}w_jx_j # \end{equation*} # $$ # # # #### Matrix Form # # The above model takes a single input $\mathbf{x}$ and produces a single output prediction. We can take this one step further by putting all of the input examples in a single matrix called the *design matrix* $\mathbf{X}$. This matrix consists of one (training) example per row. # # <br class="math" /> # $$ # \begin{equation*} # \mathbf{X} = # \begin{bmatrix} # 1 & \mathbf{x}_{11} & \cdots & \mathbf{x}_{1d} \\ # \vdots & \vdots & \ddots & \vdots \\ # 1 & \mathbf{x}_{N1} & \cdots & \mathbf{x}_{Nd} # \end{bmatrix} = # \left[ \begin{array}{c} \mathbf{x}_{1}^\intercal \\ \vdots\\ \mathbf{x}_{N}^\intercal\end{array} \right] # \end{equation*} # $$ # <br class="math" /> # # With the design matrix, predictions can be done by matrix multiplication: # # <br class="math" /> # $$ # \begin{equation*} # \hat{\mathbf{y}} = \mathbf{X}\mathbf{w} = # \begin{bmatrix} # 1 & \mathbf{x}_{11} & \cdots & \mathbf{x}_{1d} \\ # \vdots & \vdots & \ddots & \vdots \\ # 1 & \mathbf{x}_{N1} & \cdots & \mathbf{x}_{Nd} # \end{bmatrix} # \left[ \begin{array}{c} \mathbf{w}_{0} \\ \mathbf{w}_{1} \\ \vdots\\ \mathbf{w}_{d}\end{array} \right] = # \left[ \begin{array}{c} y_{1} \\ y_{2} \\ \vdots\\ y_{N}\end{array} \right] # \end{equation*} # $$ # <br class="math" /> # ## Defining an Error Function # # To measure how well our hypothesis, i.e. a particular set of weights, approximates the unknown target function $f$ we will have to come up with an error function. This quantification, which we will call $J$, goes by several different names: # # * Cost # * Energy # * Error # * Loss # * Objective # # We will be using *squared error*: $(g(\mathbf{x}) - f(\mathbf{x}))^2$ to measure how well our hypothesis approximates $f$. Seeing as we do not have access to $f$ we will instead compute an in-sample squared error over all our training data. This measure is commonly known as *mean squared error* (MSE): # # $$ # \begin{equation*} # J(\mathbf{w}) = # \frac{1}{N}\sum_{i=1}^{N}(g(\mathbf{x}_i) - y_i)^2 = # \frac{1}{N}\sum_{i=1}^{N}(\mathbf{w}^\intercal \mathbf{x}_i - y_i)^2 = # \frac{1}{N}\lVert \mathbf{X}\mathbf{w} - \mathbf{y} \rVert^2 # \end{equation*} # $$ # # A simple analogy is to think of mean squared error as a set of springs, one per training example. The objective of the learning algorithm is to balance the learned hyperplane by attempting to push it as close as we can to each of the training samples. Thus, the futher the training sample is to our hyperplane, the stronger the force is on a particular spring. # # <img src="resources/mse.png" alt="MSE Springs" width="300" /> # ### Minimising the Error Function in Matrix Form # # Now, to get a good approximation, we need to select weights $\mathbf{w}$ so that the error $J(\mathbf{w})$ is minimised. This is commonly called *ordinary least squares* or OLS. There are several ways to do this, for example, gradient descent, however, for now we will simply take the derivative of $J(\mathbf{w})$ with respect to $\mathbf{w}$ and # then equate it to zero to get the closed-form solution. # # First though, we need to expand the mean squared error representation so that we can differentiate it. The constant $\frac{1}{N}$ has been removed as it will not impact the selected weights. # # <br class="math" /> # $$ # \begin{equation*} # \begin{aligned} # J(\mathbf{w}) &= \lVert \mathbf{X}\mathbf{w} - # \mathbf{y}\rVert^2 \\ # & = (\mathbf{X}\mathbf{w} - \mathbf{y})^\intercal(\mathbf{X}\mathbf{w} - # \mathbf{y}) \\ # & = ((\mathbf{X}\mathbf{w})^\intercal - \mathbf{y}^\intercal)(\mathbf{X} # \mathbf{w} - \mathbf{y}) \\ # & = (\mathbf{X}\mathbf{w})^\intercal \mathbf{X}\mathbf{w} - # (\mathbf{X}\mathbf{w})^\intercal \mathbf{y} - \mathbf{y}^\intercal(\mathbf{X} # \mathbf{w}) + \mathbf{y}^\intercal\mathbf{y} \\ # & = \mathbf{w}^\intercal\mathbf{X}^\intercal\mathbf{X}\mathbf{w} - # 2(\mathbf{X}\mathbf{w})^\intercal \mathbf{y} + \mathbf{y}^\intercal\mathbf{y} \\ # & = \mathbf{w}^\intercal\mathbf{X}^\intercal\mathbf{X}\mathbf{w} - # 2\mathbf{y}^\intercal\mathbf{X}\mathbf{w} + \mathbf{y}^\intercal\mathbf{y} # \end{aligned} # \end{equation*} # $$ # <br class="math" /> # # <div class="alert alert-warning"> # Before we move on, here are some useful properties for matrix differentiation: # <ul> # <li>$\frac{\partial \mathbf{w}^\intercal\mathbf{A}\mathbf{w}}{\partial \mathbf{w}} = 2\mathbf{A}^\intercal\mathbf{w}$</li> # </ul> # <ul> # <li>$\frac{\partial \mathbf{B}\mathbf{w}}{\partial \mathbf{w}} = \mathbf{B}^\intercal$</li> # </ul> # </div> # # Let $A = \mathbf{X}^\intercal\mathbf{X}$ and $B = 2\mathbf{y}^\intercal\mathbf{X}$. Substitute and differentiate: # # <br class="math" /> # $$ # \begin{equation*} # \begin{aligned} # \frac{\partial J(\mathbf{w})}{\partial \mathbf{w}} # &= \frac{\partial}{\partial \mathbf{w}} # (\mathbf{w}^\intercal\mathbf{X}^\intercal\mathbf{X}\mathbf{w} - # 2\mathbf{y}^\intercal\mathbf{X}\mathbf{w} + # \mathbf{y}^\intercal\mathbf{y}) \\ # &= \frac{\partial}{\partial \mathbf{w}} # (\mathbf{w}^\intercal A \mathbf{w} - # B\mathbf{w} + # \mathbf{y}^\intercal\mathbf{y}) \\ # &= 2\mathbf{A}^\intercal\mathbf{w} - \mathbf{B}^\intercal + 0 # \end{aligned} # \end{equation*} # $$ # <br class="math" /> # # Now, let's replace $\mathbf{A}$ and $\mathbf{B}$: # # <br class="math" /> # $$ # \begin{equation*} # \frac{\partial J(\mathbf{w})}{\partial \mathbf{w}} # = 2\mathbf{X}^\intercal\mathbf{X}\mathbf{w} - 2\mathbf{X}^\intercal\mathbf{y} # \end{equation*} # $$ # <br class="math" /> # # Finally, let's throw away constant terms, equate to zero, and solve for $\mathbf{w}$: # # <br class="math" /> # $$ # \begin{equation*} # \begin{aligned} # \frac{\partial J(\mathbf{w})}{\partial \mathbf{w}} # &= 0 \\ # \mathbf{X}^\intercal\mathbf{X}\mathbf{w} - \mathbf{X}^\intercal\mathbf{y} &= 0 \\ # \mathbf{X}^\intercal\mathbf{X}\mathbf{w} &= \mathbf{X}^\intercal\mathbf{y} \\ # \mathbf{w} &= (\mathbf{X}^\intercal\mathbf{X})^{-1}\mathbf{X}^\intercal\mathbf{y} # \end{aligned} # \end{equation*} # $$ # <br class="math" /> # # And there we have it, the closed-form solution for ordinary least squares. # # Notice how we have to compute the inverse of a matrix. This means that $\mathbf{X}^\intercal\mathbf{X}$ must be non-singular, however, there are ways to circumvent this issue, for example, by using the Moore-Penrose pseudoinverse instead: `numpy.linalg.pinv()`. # ## Using the Closed-Form Solution # # To use the closed-form solution we derived above to solve the `income` vs. `education` problem we require a few things, namely: # # * The design matrix $\mathbf{X}$ # * A column vector of ground truths $\mathbf{y}$ # * A function that takes the two aforementioned matrices and evaluates the closed-form solution to get a set of weights $\mathbf{w}$ # # The last two requirements will have to be implemented by you. # # <div class="alert alert-info"> # <strong>In the follow code snippet we will:</strong> # <ul> # <li>Create the design matrix $\mathbf{X}$</li> # </ul> # </div> # + def build_X(x_data): """Return design matrix given an array of N samples with d dimensions. """ # Create matrix Ax1 if d = 1 if x_data.ndim == 1: x_data = np.expand_dims(x_data, axis=1) # Find the number of samples and dimensions nb_samples = x_data.shape[0] nb_dimensions = x_data.shape[1] # Create Nxd+1 matrix filled with ones _X = np.ones((nb_samples, nb_dimensions + 1)) # Paste in the data we have in the new matrix _X[:nb_samples, 1:nb_dimensions + 1] = x_data return _X # Test and see that the design matrix was built correctly tests.test_build_x(build_X) # - # ### Task I: Build y # # The second component we require is the vector $\mathbf{y}$. This is a column vector over all the ground truths or target values in our training dataset. For completeness, it has the following form: # # <br class="math" /> # $$ # \begin{equation*} # \mathbf{y} = \left[ \begin{array}{c} y_{1} \\ y_{2} \\ \vdots\\ y_{N}\end{array} \right] # \end{equation*} # $$ # <br class="math" /> # # <div class="alert alert-success"> # **Task**: Build the $\mathbf{y}$ vector shown above. Use the previous code snippet as a reference for your implementation. # </div> # + slideshow={"slide_type": "subslide"} def build_y(y_data): """Return a column vector containing the target values y. """ # Make a copy of the argument that we can work on _y = y_data.copy() # Create y matrix Nx1 # Return result return _y ### Do *not* modify the following line ### # Test and see that the y vector was built correctly tests.test_build_y(build_y) # - # ### Task II: Implement Closed-Form Solution # # Now that we have both the design matrix $\mathbf{X}$ and the vector of target values $\mathbf{y}$ we can fit a linear model using the closed-form solution we derived before. Remember all of we have to do is implement the following expression: # # $$ # \begin{equation*} # \mathbf{w} = (\mathbf{X}^\intercal\mathbf{X})^{-1}\mathbf{X}^\intercal\mathbf{y} # \end{equation*} # $$ # # Please refer to the following sources for how to utilise the various functions in NumPy when implementing your solution: # # * How to perform matrix multiplication in NumPy [np.dot()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) # * How to compute the inverse of a matrix in NumPy [np.linalg.inv()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.inv.html) or [np.linalg.pinv()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html) # * How to transpose a NumPy array [X.T](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.T.html) # # <div class="alert alert-success"> # **Task**: Implement a function that evaluates the closed-form solution given a design matrix $\mathbf{X}$ and target vector $\mathbf{y}$. # </div> # + def compute_weights(X, y): """Return a vector of weights found by the derived closed-form solution. """ weights = None # Implement closed-form solution here return weights ### Do *not* modify the following line ### # Test and see that the weights are calculated correctly tests.test_compute_theta(compute_weights) # - # ### Task III: Learn a Linear Regression Model # # We have now implemeted all of the necessary building blocks: # # * `build_X()` : Used to build the design matrix $\mathbf{X}$ # * `build_y()` : Used to build the vector of target values $\mathbf{y}$ # * `compute_weights` : Used to fit a linear model to the data using the solution we derived above # # After we have estimated $\mathbf{w}$ we can perform predictions on unseen data by computing: $\hat{\mathbf{y}} = \mathbf{X}\mathbf{w}$. # # <div class="alert alert-success"> # **Task**: Learn the weights $\mathbf{w}$ given the building blocks we have implemented. # </div> # + # Build design matrix (TASK) X = None # Build y vector (TASK) y = None # Learn linear model (TASK) W = None # - # <div class="alert alert-info"> # <strong>In the follow code snippet we will:</strong> # <ul> # <li>Print the weights we learned</li> # <li>Plot the hyperplane (line in our case because $d=1$) that $\mathbf{w}$ represents</li> # </ul> # </div> # + # Print weights print('The learned linear model looks like this:') print('Y = {:.3f} x + {:.3f}'.format(W[1, 0], W[0, 0])) # Plot hyperplane and training data xs = np.linspace(X_data.min(), X_data.max(), num=50) ys = np.dot(build_X(xs), W) plt.figure() plt.scatter(X_data, y_data, label='Training data') plt.plot(xs, ys, color='Red', linewidth=1, label='Fit') plt.title('Education vs. Income') plt.xlabel('Education index') plt.ylabel('Income index') plt.grid(linestyle='dotted') plt.legend() plt.show() # - # ## Critical Analysis # # Albeit easy to derive and easy to use, our closed-form solution has a few shortcomings: # # * Requires matrix inversion # * Very computationally expensive # * Not ideal for distributed computing # * Issues become apparant when the number of features $d$ and number of samples $N$ begin to grow # * Depending on the size of the dataset it might be difficult / infeasible to fit all of it in memory # # To tackle these issues we will attempt to solve the linear regression problem using an iterative optimisation method called **gradient descent**. # # Gradient Descent # # <div class="alert alert-warning"> # In artificial neural network literature one can see several different symbols in use to signify the error function. For example, in addition to $J$ there is also $E$ (error), $L$ (loss), $C$ (cost), and even $err$. The rest of this notebook will use the symbol $E$ instead of $J$. # </div> # # Gradient descent is an iterative optimisation algorithm. In general, it works by taking the derivative of an error function $E(\mathbf{w})$ with respect to the parameters $\mathbf{w}$ of the model, and then alter the parameters in the direction of the *negative* gradient. # # This can be summarised as: $\mathbf{w}(k+1)\leftarrow\mathbf{w}(k) - \eta\frac{\partial E(\mathbf{w})}{\partial\mathbf{w}}$, where $\mathbf{w}(k)$ signifies the state of the model parameters at iteration $k$, and $\eta$ is known as the *learning rate* and decides how much the parameters should change with each application of the rule. # # This *update rule* is repeated until convergence or until the maximum number of iterations has been reached. # # **With gradient descent we can**: # # * Reduce memory issues by only working on parts of the data at a time # * Distribute the computation among several computational nodes. This enables distributed computing and parallelisation which allows us to exploit new architectures such as GPUs, FPGAs, and ASICs # * Gradient descent is a heavily use *type* of algorithm that opens the door for models such as artificial neural networks # ## Digression: A Different Perspective # # Linear models, such as linear regression, can be represented as artifical neural networks. An illustration of this can be seen below: # # <img src="resources/linear-regression-net.png" alt="Linear regression as an artificial neural network" width="300" /> # # As before, the input $\mathbf{x} \in \mathbb{R}^d$ and the input is integrated via a linear combination plus a bias. The integrated value is activated by an activation function $\sigma$, which for our linear regression model is defined as $\sigma(x) = x$. # # In other words, $\hat{y}$ is defined as $\sigma(\mathbf{X}\mathbf{w})$, which simplifies to $\mathbf{X}\mathbf{w}$ because the activation function used for linear regression is the identity function. In artificial neural network terminology we would typically say that the activation function is *linear*. # ## Learning with Gradient Descent # # As we saw above, learning with gradient descent is easy. All we have to do is apply an *update rule* a set number of iterations until we are satisfied with the resulting weights. The update rule can be be seen below: # # $$ # \begin{equation*} # \mathbf{w}(k+1)\leftarrow\mathbf{w}(k) - \eta\frac{\partial E(\mathbf{w})}{\partial\mathbf{w}} # \end{equation*} # $$ # # In words, the weights for the next iteration $k+1$ is the weights of the current iteration $k$ plus the *negative* gradient $\frac{\partial E(\mathbf{w})}{\partial\mathbf{w}}$ scaled by the learning rate $\eta$. In other words, for each iteration in gradient descent we adjust the weights we have with respect to the gradient of the error function $E(\mathbf{w})$. # # An illustration of how this could look like with the mean squared error function can be seen below: # # <img src="resources/error-grad.png" alt="MSE gradient" width="300" /> # # The current state of several different weight states are signified by red dots, while the arrow points in the negative gradient direction. The optimal weight state is found at the minima, which yields the lowest amount of error. # ### Finding the Gradient # # To finalise the update rule we need to find: $\frac{\partial E(\mathbf{w})}{\partial\mathbf{w}}$. This, of course, depends on the form of $E(\mathbf{w})$. # # The squared error for a single sample $\mathbf{x}_i$ in the training dataset is defined as: # # $E(\mathbf{w}) = (\hat{y}_i - y_i)^2$ # # where $\hat{y}_i=\sigma(g)$ and $g(\mathbf{x})=\mathbf{w}^\intercal \mathbf{x}_i$. # # To simplify the derivation we will scale the squared error by halving it; this will not change the optimal solution: # # $E(\mathbf{w}) = \frac{1}{2}(\hat{y}_i - y_i)^2$ # # Let's now attempt to find the derivative we need: # # <br class="math" /> # $$ # \begin{equation*} # \frac{\partial E(\mathbf{w})}{\partial\mathbf{w}} # = \frac{\partial}{\partial\mathbf{w}}( \frac{1}{2}(\hat{y}_i - y_i)^2) # \end{equation*} # $$ # <br class="math" /> # # Seeing as $\hat{y}$ is dependent on $\mathbf{w}$ we will need to use the chain rule of calculus. # # <div class="alert alert-warning"> # Let $a(b) = \frac{1}{2}(b)^2$ and $b(\mathbf{w}) = (\hat{y}_i - y_i)$, then $\frac{\partial a}{\partial\mathbf{w}}=\frac{\partial a}{\partial b}\frac{\partial b}{\partial\mathbf{w}}$. # </div> # # Therefore: # # <br class="math" /> # $$ # \begin{equation*} # \begin{aligned} # \frac{\partial E(\mathbf{w})}{\partial\mathbf{w}} # &= \frac{\partial a}{\partial b}\frac{\partial b}{\partial\mathbf{w}} \\ # &= (\hat{y}_i - y_i)\frac{\partial}{\partial\mathbf{w}}(\hat{y}_i - y_i) \\ # &= (\hat{y}_i - y_i)((\frac{\partial}{\partial\mathbf{w}}\hat{y}_i) - (\frac{\partial}{\partial\mathbf{w}}y_i)) \\ # &= (\hat{y}_i - y_i)((\frac{\partial}{\partial\mathbf{w}}\hat{y}_i) - 0) \\ # &= (\hat{y}_i - y_i)\frac{\partial}{\partial\mathbf{w}}\hat{y}_i \\ # \end{aligned} # \end{equation*} # $$ # <br class="math" /> # # Keep in mind that: # # * $\hat{y}_i=\sigma(g)$ # * $g(\mathbf{x})=\mathbf{w}^\intercal \mathbf{x}_i$. # # For now, let's replace $\hat{y}$ with $\sigma(g)$: # # <br class="math" /> # $$ # \begin{equation*} # \begin{aligned} # \frac{\partial E(\mathbf{w})}{\partial\mathbf{w}} # &= (\hat{y}_i - y_i)\frac{\partial}{\partial\mathbf{w}}\sigma(g) # \end{aligned} # \end{equation*} # $$ # <br class="math" /> # # Again we have to use the chain rule. # # <div class="alert alert-warning"> # Let $a(b) = \sigma(b)$ and $b(\mathbf{w}) = (\mathbf{w}^\intercal \mathbf{x}_i)$, then $\frac{\partial a}{\partial\mathbf{w}}=\frac{\partial a}{\partial b}\frac{\partial b}{\partial\mathbf{w}}$. # </div> # # <br class="math" /> # $$ # \begin{equation*} # \begin{aligned} # \frac{\partial E(\mathbf{w})}{\partial\mathbf{w}} # &= (\hat{y}_i - y_i)\frac{\partial a}{\partial b}\frac{\partial b}{\partial\mathbf{w}} \\ # &= (\hat{y}_i - y_i)\sigma '(g)\mathbf{x}_i # \end{aligned} # \end{equation*} # $$ # <br class="math" /> # # Thus, the update rule for gradient descent, regardless of activation function, is defined as: # # <br class="math" /> # $$ # \begin{equation*} # \mathbf{w}(k+1) \leftarrow \mathbf{w}(k) - \eta((\hat{y}_i - y_i)\sigma '(g)\mathbf{x}_i) # \end{equation*} # $$ # <br class="math" /> # # Seeing as we're doing linear regression, we know that activation function is linear, i.e. $\sigma(x)=x$, where $\sigma'(x)=1$. So the final update rule will look like this: # # <div class="alert alert-info"> # $$ # \begin{equation*} # \begin{aligned} # \mathbf{w}(k+1) &\leftarrow \mathbf{w}(k) - \eta((\hat{y}_i - y_i)\mathbf{x}_i) \\ # &\leftarrow \mathbf{w}(k) - \eta((\mathbf{w}^\intercal \mathbf{x}_i - y_i)\mathbf{x}_i) # \end{aligned} # \end{equation*} # $$ # </div> # # Note that this updates the weights using only a single input example. This is generally called *stochastic* gradient descent. Typically the amount we adjust by is taken over a *batch*, i.e. subset, of examples. # # For completeness, the update rule above can be defined over a set of $m$ samples like so: # # $$ # \begin{equation*} # \mathbf{w}(k+1) \leftarrow \mathbf{w}(k) - \eta\frac{1}{m}\sum_{i=i}^{m}(\mathbf{w}^\intercal \mathbf{x}_i - y_i)\mathbf{x}_i # \end{equation*} # $$ # ## Gradient Descent with Keras # # Thankfully, when using gradient descent we do not need to derive and implement it ourselves as there are many programming libraries out there that can do automatic differentiation for us. # # In this and future notebooks we will be using the Python library [Keras](https://keras.io/). This is a high-level library for building and training artificial neural networks running on either [TensorFlow](https://www.tensorflow.org/) or [Theano](http://deeplearning.net/software/theano/). We will be able to leverage Keras when creating our linear regression model with gradient descent because linear models can be interpreted as artificial neural networks. # # <div class="alert alert-info"> # <strong>In the following code snippets we will:</strong> # <ul> # <li>Create a linear regression model for the `Income` vs. `Education` problem in Keras</li> # <li>Train the model using (stochastic) gradient descent</li> # </ul> # </div> # Let's start by importing the modules we need from Keras as well as some additional ones we will use during training. # + import time # A library for easily displaying progress meters import tqdm # Contains all built-in optimisation tools in Keras, such as stochastic gradient descent from keras import optimizers # An input "layer" and a densely-connected neural network layer from keras.layers import Input, Dense # Model is an API that wraps our linear regression model from keras.models import Model # - # The input to our model is a single scalar value (`Education`). The output is also a single scalar value (`Income`). # + # There is only a *single* feature input_X = Input(shape=(1,)) # The output of the model is a single value output_y = Dense(units=1, use_bias=True)(input_X) # We give the input and output to our Model API model = Model(inputs=input_X, outputs=output_y) # Print a summary of the model model.summary() # - # Notice in the print above how the fully-connected layer `Dense()` has two *trainable* parameters. One is the weight (slope), while the second is the bias (intercept). Keras adds bias units by default, but it can be turned off by setting `use_bias=False`. # # The next thing we have to do in Keras is to set up an *optimiser* (sometimes called *solver*). There are many [alternatives](https://keras.io/optimizers/) to select from, however, we will settle for the stochastic gradient descent algorithm we discussed earlier. # + # # Start by setting some user options # # Learning rate (set very small so we can clearly see the training progress) lr = 0.0001 # Number of times to apply the update rule nb_iterations = 100 # Number of samples to include each iteration (used to compute gradients) nb_samples = 30 # Create optimiser using Keras sgd = optimizers.SGD(lr=lr) # Add the optimiser to our model, make it optimise mean squared error model.compile(optimizer=sgd, loss='mean_squared_error') # - # Now that both the model definition and the optimiser is set up we can start training. Training using the Keras model API is done by calling the `fit()` method. # # Don't worry too much if this code is a bit too much right now. We will get much more experience with using Keras throughout the upcoming notebooks. # # While training the model, a plot is continuously updated to display the fitted line. # + fig, ax = plt.subplots(1,1) # Perform `nb_iterations` update rule applications for i in tqdm.tqdm(np.arange(nb_iterations)): # Learn by calling the `fit` method model.fit(X_data, y_data, batch_size=nb_samples, epochs=1, verbose=0) # Make a plot of the data and the current fit xs = np.linspace(X_data.min(), X_data.max(), num=50) ys = model.predict(xs) ax.clear() ax.scatter(X_data, y_data, label='Training data') ax.plot(xs, ys, color='Red', linewidth=1, label='Fit') ax.set_xlabel('Education index') ax.set_ylabel('Income index') ax.grid(linestyle='dotted') ax.legend() fig.canvas.draw() time.sleep(0.05) # -
1-regression/1-linear-regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # name: python3 # --- # # Potentially inappropriate prescribing of DOACs to people with mechanical heart valves: a federated analysis of 57.9 million patients’ primary care records in situ using OpenSAFELY # National guidance was issued during the COVID-19 pandemic to switch patients on warfarin to direct oral anticoagulants (DOACs) where appropriate as these require less frequent blood testing. DOACs are not recommended for patients with mechanical heart valves. We conducted a retrospective cohort study of DOAC prescribing in people with a record of a mechanical heart valve between September 2019 and May 2021, and describe the characteristics of this population. We identified 15,457 individuals with a mechanical heart valve recorded in their records, of whom 1058 (6.8%) had been prescribed a DOAC during the study period. 767 individuals with a record of a mechanical heart valve were currently prescribed a DOAC as of May 31st 2021. This is suggestive of inappropriate prescribing of DOACs in individuals with mechanical heart valves. Direct alerts have been issued to clinicians through their EHR software informing the issue. We show that the OpenSAFELY platform can be used for rapid audit and feedback to mitigate the indirect health impacts of COVID-19 on the NHS. We will monitor changes in prescribing for this risk group over the following months. # ## Results # ### Monthly Count # The monthly number of DOAC prescriptions in people with a record of a mechanical valve. from IPython.display import Image Image("../released_outputs/count.jpeg") # **Figure 1. The monthly number of people with a record of a mechanical valve who are also prescribed a DOAC between September 2019 and September 2021. *Figure updated on 27-10-21 to include data for 06-21 - 09-21.** # The monthly number of DOAC prescriptions in people with a record of a mechanical valve with mean value in the 6 months prior to and 6 months following advice on the safe switching of patients on warfarin to a DOAC in March 2020 shows an increase from 507 (95% C.I. = 489,525) to 557 (95% C.I. = 538,576) (9.9%). from IPython.display import Image Image("../released_outputs/count_with_mean.jpeg") # **Figure 2. The monthly number of people with a record of a mechanical valve who are also prescribed a DOAC between September 2019 and September 2021 with the mean value 6 months prior to and 6 months after the issuing of national anticoagulation switching guidance. *Figure updated on 27-10-21 to include data for 06-21 - 09-21.** # ### Monthly Rate # The monthly rate of DOAC prescribing per 1000 people with a record of a mechanical valve. Image("../released_outputs/rate.jpeg") # **Figure 3. The monthly rate of people prescribed a DOAC per 1000 people with a record of a mechanical heart valve between September 2019 and September 2021. *Figure updated on 27-10-21 to include data for 06-21 - 09-21.**
analysis/report_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: finance # language: python # name: finance # --- # # StenoTexty # # Tento notebook analyzuje stenografické přepisy schůzí PS ČR. # ## Instalace knihovny Parlamentikon # # Nastavení notebooku pro prostředí Google Colab a pro lokální běh. # Specifické příkazy pro prostředí Google Colab if 'google.colab' in str(get_ipython()): import os, sys os.chdir('/content') # Stažení knihovny # ! ls parlamentikon || git clone "https://github.com/parlamentikon/parlamentikon.git" --branch main os.chdir('/content/parlamentikon/notebooks') instalace_zavislosti = True if instalace_zavislosti: # ! pip install -r ../requirements.txt 1>/dev/null instalace_knihovny = False if instalace_knihovny: # ! pip install .. 1>/dev/null else: # Přidání cesty pro lokální import knihovny import sys, os sys.path.insert(0, os.path.abspath('..')) # ## Import knihoven from datetime import datetime, timedelta import plotly.graph_objects as go import pandas as pd import numpy as np from parlamentikon.Hlasovani import Organy from parlamentikon.Snemovna import * from parlamentikon.StenoTexty import * from parlamentikon.utility import * from nastav_notebook import nastav_pandas nastav_pandas() # ## Nastavení parametrů # Data se budou pokaždé znovu stahovat z achivu PS stahni=True # ## Načtení dat vsechny_organy = Organy(volebni_obdobi=-1) snemovny = vsechny_organy[vsechny_organy.nazev_typ_organ_cz == 'Parlament'].od_organ.dt.year.sort_values() snemovny = snemovny.sort_values().to_list() posledni_snemovna = snemovny[-1] assert isinstance(posledni_snemovna, int) print(f"Snemovny dle počátku roku: {snemovny}.") print(f"Poslední sněmovna: {posledni_snemovna}.") # + [markdown] id="LqbC3QodaH-V" # ### Načtení tabulku StenoTexty # - st = StenoTexty(volebni_obdobi=2017, stahni=True) # ## Analýza print(f"Počet záznamů v tabulce StenoTexty: {len(st)}") print(f"Sloupce v tabulce StenoTexty: {st.columns}") fields = ['schuze', 'id_osoba', 'jmeno', 'prijmeni', 'pohlavi', 'zkratka', 'druh', 'date', 'text', 'poznamka', 'je_poznamka', 'cas', 'typ_casu', 'hlasovani', 'cisla_hlasovani'] st[fields].head(5) print("Druhy záznamů v tabulce StenoTexty") print('-'*35) data = st.groupby('druh').size() x = data.index y=data.values print(data) fig = go.Figure(go.Bar(x=x, y=y)) fig.update_layout(dict( title="Počet promluv dle druhu záznamu", xaxis=dict(title="Druh záznamu"), yaxis=dict(title="Počet promluv") )) # ### Může předsedající vystupovat jako řečník? # # Protože se předsedající mohou v průběhu sněmovny měnit, není úplně jednoduché na tuto otázku odpovědět obecně. Pro kvalifikovaný odhad prozkoumáme data předsedajícího s největším počtem záznamů. predsedajici_s_nejvice_zaznamy = st[~st['id_osoba'].isna() & st.druh.isin(['předsedající', 'předsedající (ověřeno)'])].id_osoba.value_counts().sort_values().index[-1] predsedajici_mluvi_jako_recnik = len(st[st['id_osoba'] == predsedajici_s_nejvice_zaznamy & st.druh.isin(['řečník', 'řečník (ověřeno)'])]) if predsedajici_mluvi_jako_recnik == 0: print("Je velmi pravdepdobné, že předsedající nevystupují v záznamech v roli řečníků, ale jsou vždy označeni jako předsedající.") print("Nelze tedy snadno odlišit, kdy mluví za sebe a kdy jenom organizují dění ve sněmovně") # Promluva je záznam v tabulce StenoTexty, která nebyla pronesena předsedajícím a není to poznámka. promluvy = st[~(st['je_poznamka'] == True) & st.druh.isin(['řečník', 'řečník (ověřeno)'])][fields] print(f"Bylo identitikováno {len(promluvy)} promluv.") promluvy.head() # Počet promluv dle politické strany data = promluvy.groupby('zkratka').size().sort_values(ascending=False) x = data.index y = data.values fig = go.Figure(go.Bar(x=x, y=y)) fig.update_layout(dict( title="Počet promluv dle politické strany", xaxis=dict(title="Politická strana"), yaxis=dict(title="Počet promluv") )) fig.show() data = promluvy.text.str.split(' ').apply(len).groupby(promluvy.zkratka).mean().sort_values() x = data.index y = data.values fig = go.Figure(go.Bar(x=x, y=y)) fig.update_layout(dict( title="Průměrná délka promluvy dle politické strany", xaxis=dict(title="Politická strana"), yaxis=dict(title="Průměrná délka promluvy [počet slov]") )) fig.show() # Toto je třeba normalizovat na počet dní ve sněmovně K = 20 grouping_field = promluvy.jmeno + ' '+ promluvy.prijmeni + ' (' + promluvy.zkratka + ')' data = promluvy.text.str.split(' ').apply(len).groupby(grouping_field).sum().sort_values(ascending=False) x = data.index[:K] y = data.values[:K] fig = go.Figure(go.Bar(x=x, y=y)) fig.update_layout(dict( title="Poslanci a poslankyně s největším množstvím pronesených slov", xaxis=dict(tickangle=45), yaxis=dict(title="Počet slov", ) )) fig.show() K = 20 grouping_field = promluvy.jmeno + ' '+ promluvy.prijmeni + ' (' + promluvy.zkratka + ')' data = promluvy.text.str.split(' ').apply(len).groupby(grouping_field).sum().sort_values() x = data.index[:K] y = data.values[:K] fig = go.Figure(go.Bar(x=x, y=y)) fig.update_layout(dict( title="Poslanci a poslankyně s nejmenším množstvím pronesených slov", xaxis=dict(tickangle=45), yaxis=dict(title="Počet slov", ) )) fig.show() print(f"Poslední běh notebooku dokončen {datetime.now().strftime('%d.%m.%Y v %H:%M:%S')}.") # + active="" #
notebooks/Otazky_save.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Mixup data augmentation # + hide_input=true from fastai.gen_doc.nbdoc import * from fastai.callbacks.mixup import * from fastai.vision import * # - # ## What is mixup? # This module contains the implementation of a data augmentation technique called [mixup](https://arxiv.org/abs/1710.09412). It is extremely efficient at regularizing models in computer vision (we used it to get our time to train CIFAR10 to 94% on one GPU to 6 minutes). # # As the name kind of suggests, the authors of the mixup article propose training the model on mixes of the training set images. For example, suppose we’re training on CIFAR10. Instead of feeding the model the raw images, we take two images (not necessarily from the same class) and make a linear combination of them: in terms of tensors, we have: # # `new_image = t * image1 + (1-t) * image2` # # where t is a float between 0 and 1. The target we assign to that new image is the same combination of the original targets: # # `new_target = t * target1 + (1-t) * target2` # # assuming the targets are one-hot encoded (which isn’t the case in PyTorch usually). And it's as simple as that. # # ![mixup](imgs/mixup.png) # # Dog or cat? The right answer here is 70% dog and 30% cat! # # As the picture above shows, it’s a bit hard for the human eye to make sense of images obtained in this way (although we do see the shapes of a dog and a cat). However, it somehow makes a lot of sense to the model, which trains more efficiently. One important side note is that when training with mixup, the final loss (training or validation) will be higher than when training without it, even when the accuracy is far better: a model trained like this will make predictions that are a bit less confident. # ## Basic Training # To test this method, we first create a [`simple_cnn`](/layers.html#simple_cnn) and train it like we did with [`basic_train`](/basic_train.html#basic_train) so we can compare its results with a network trained with mixup. path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) model = simple_cnn((3,16,16,2)) learn = Learner(data, model, metrics=[accuracy]) learn.fit(8) # ## Mixup implementation in the library # In the original article, the authors suggest four things: # # 1. Create two separate dataloaders, and draw a batch from each at every iteration to mix them up # 2. Draw a value for t following a beta distribution with a parameter alpha (0.4 is suggested in their article) # 3. Mix up the two batches with the same value t # 4. Use one-hot encoded targets # # This module's implementation is based on these suggestions, and modified where experimental results suggested changes that would improve performance. # The authors suggest using the beta distribution with parameters alpha=0.4. (In general, the beta distribution has two parameters, but in this case they're going to be equal.) Why do they suggest this? Well, with the parameters they suggest, the beta distribution looks like this: # # ![betadist](imgs/betadist-mixup.png) # # meaning that there's a very high probability of picking values close to 0 or 1 (in which case the mixed up image is mostly from only one category) and then a somewhat constant, much smaller probability of picking something in the middle (notice that 0.33 is nearly as likely as 0.5, for instance). # # While this works very well, it’s not the fastest way, and this is the first suggestion we adjust. The unnecessary slowdown with this approach comes from drawing two different batches at every iteration, which means loading twice the number of images and additionally applying any other data augmentation functions to them. To avoid this, we apply mixup on a batch with a shuffled version of itself: this way, the images mixed up are still different. # # Using the same value of `t` for the whole batch is another suggestion we modify. In our experiments, we noticed that the model trained faster if we drew a different `t` for every image in the batch. (Both options got to the same result in terms of accuracy, it’s just that one arrived there more slowly.) # # Finally, notice that with this strategy we might create duplicate images: let’s say we are mixing `image0` with `image1` and `image1` with `image0`, and that we draw `t=0.1` for the first mix and `t=0.9` for the second. Then # # `image0 * 0.1 + shuffle0 * (1-0.1) = image0 * 0.1 + image1 * 0.9` # # and # # `image1 * 0.9 + shuffle1 * (1-0.9) = image1 * 0.9 + image0 * 0.1` # # will be the same. Of course we have to be a bit unlucky for this to happen, but in practice, we saw a drop in accuracy when we didn't remove duplicates. To avoid this, the trick is to replace the vector of `t` we drew with: # # `t = max(t, 1-t)` # # The beta distribution with the two parameters equal is symmetric in any case, and this way we ensure that the largest coefficient is always near the first image (the non-shuffled batch). # ## Adding mixup to the mix # We now add [`MixUpCallback`](/callbacks.mixup.html#MixUpCallback) to our Learner so that it modifies our input and target accordingly. The [`mixup`](/train.html#mixup) function does this for us behind the scenes, along with a few other tweaks described below: # + hide_input=false model = simple_cnn((3,16,16,2)) learner = Learner(data, model, metrics=[accuracy]).mixup() learner.fit(8) # - # Training with mixup improves the best accuracy. Note that the validation loss is higher than without mixup, because the model makes less confident predictions: without mixup, most predictions are very close to 0. or 1. (in terms of probability) whereas the model with mixup makes predictions that are more nuanced. Before using mixup, make sure you know whether it's more important to optimize lower loss or better accuracy. # + hide_input=true show_doc(MixUpCallback) # - # Create a [`Callback`](/callback.html#Callback) for mixup on `learn` with a parameter `alpha` for the beta distribution. `stack_x` and `stack_y` determine whether we stack our inputs/targets with the vector lambda drawn or do the linear combination. (In general, we stack the inputs or outputs when they correspond to categories or classes and do the linear combination otherwise.) # ### Callback methods # You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. # + hide_input=true show_doc(MixUpCallback.on_batch_begin) # - # Draws a vector of lambda following a beta distribution with `self.alpha` and operates the mixup on `last_input` and `last_target` according to `self.stack_x` and `self.stack_y`. # ## Dealing with the loss # We often have to modify the loss so that it is compatible with mixup. PyTorch was very careful to avoid one-hot encoding targets when possible, so it seems a bit of a drag to undo this. Fortunately for us, if the loss is a classic [cross-entropy](https://pytorch.org/docs/stable/nn.html#torch.nn.functional.cross_entropy), we have # # `loss(output, new_target) = t * loss(output, target1) + (1-t) * loss(output, target2)` # # so we don’t one-hot encode anything and instead just compute those two losses and find the linear combination. # # The following class is used to adapt the loss for mixup. Note that the [`mixup`](/train.html#mixup) function will use it to change the `Learner.loss_func` if necessary. # + hide_input=true show_doc(MixUpLoss, title_level=3) # - # ## Undocumented Methods - Methods moved below this line will intentionally be hidden # + hide_input=true show_doc(MixUpLoss.forward) # -
docs_src/callbacks.mixup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="29202d68" import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import matplotlib as mpl # + id="8f403909" # data copied-and-pasted b/c I was confused on how to read the csv file year = [2019, 2018, 2017, 2016, 2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007, 2006, 2005, 2004, 2003, 2002, 2001, 2000, 1999, 1998, 1997, 1996, 1995, 1994, 1993, 1992, 1991, 1990] power_alameda = [1.219644505, 1.186228519, 1.264777462, 1.231874982, 1.168422944, 1.175785136, 1.212175942, 1.209013196, 1.253362435, 1.223996689, 1.182478663, 1.276024233, 1.339936621, 1.276506869, 1.263246745, 1.218165789, 1.209988745, 1.190065831, 1.163078534, 1.217141746, 1.216532475, 1.139830269, 1.19841083, 1.185426237, 1.12538686, 1.077945598, 1.117580562, 1.060219557, 1.064473029, 1.096754055] power_sanmateo = [0.4937533529, 0.4858005828, 0.4974260016, 0.4956347078, 0.5062807011, 0.5087955508, 0.5173882833, 0.5153553197, 0.5219611221, 0.5441921083, 0.5675114887, 0.5795212978, 0.5708548056, 0.5332836849, 0.5187733736, 0.5437126854, 0.5265624475, 0.4811527559, 0.4828658192, 0.5489501515, 0.5231460969, 0.5061371983, 0.4973037249, 0.4703170002, 0.4590977992, 0.4500218154, 0.4515484748, 0.4461150382, 0.4440786231, 0.4529199782] power_losangeles = [7.547793722, 7.751995733, 7.837525135, 7.9239777, 7.937476414, 7.985550131, 7.805171052, 7.907689663, 7.785088207, 7.792016886, 7.985113916, 8.228524426, 8.134186536, 8.097540943, 7.989779733, 7.901020024, 7.821636651, 7.730021729, 7.961344337, 7.927376964, 7.615674385, 7.500729192, 7.518751431, 7.32656969, 7.130828252, 7.166858867, 7.100540271, 7.223044243, 7.159228419, 7.391300796] # questions: how to read csv file? should data be lists or something else? # + id="c6754b88" outputId="af51a566-02eb-40eb-c3c5-f8f0ba279754" # calculating averages def average(): sum = 0 for item in year: sum = sum + year[item]*power_alameda[item] sum = sum/30 return sum # questions: cannot put '=' in line 3 (like the format we had in the notes)? how to fix error in line 4? how to make it so # the 'power' data can be exchanged for a different data set? # + id="4c1a5738" #creating plots mpl.rcParams['font.family']='serif' mpl.rcParams['font.size']=14 def InitializePlot(): plt.clf() f= plt.figure(figsize=(8,8)) plt.xlim(1990, 2019) plt.ylim(1.0, 1.3) plt.xlabel('year') plt.ylabel('power (MW)') # questions: how to change y limits for each data set? # + id="563d2153" # creating plots (continued) f = InitializePlot() plt.scatter(year, power, c = 'gray', s = 3, edgecolors = 'none') # questions: 'year' and 'power' need to change, but I'm not sure what they should be? how to run this for each county # data set? # + id="9548171b" # best-fit line
modules/module1/1a/SIP Task 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests from IPython.display import display import pegasus_functions as pf from datetime import datetime eov_packet = pf.get_eov_packet() obis_areas = requests.get("https://api.obis.org/area").json() # + area_stats_cache = list() for i_eov,eov in enumerate(eov_packet): if i_eov > 0: break for i_area,area in enumerate(obis_areas["results"]): if i_area > 4: break area_stats = pf.summary_stats_by_aphiaids(pf.get_worms_info(eov["name"])["valid_aphiaids"], summary_type="statistics/all", area_id=area["id"]) for k, v in area.items(): area_stats[f"area_{k}"] = v for k, v in eov.items(): area_stats[f"eov_{k}"] = v area_stats["area_records_per_year"] = pf.summary_stats_by_aphiaids(pf.get_worms_info(eov["name"])["valid_aphiaids"], summary_type="statistics/years", area_id=area["id"]) area_stats["date_cached"] = datetime.utcnow().isoformat() area_stats_cache.append(area_stats) display(area_stats_cache) print(len(area_stats_cache)) # -
Area Stats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def carre(x): resultat = x*x return resultat val1 = carre(4) val2 = carre(16) print("val1", val1) print("val2", val2) def carre(x): return x*x val3 = carre(8) print(val3)
37_return.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- SAMPLE_PATH = 'D:/OneDrive/Desktop/siim-acr-pneumothorax-segmentation/sample images/*.dcm' DATASET_PATH = 'D:/OneDrive/Desktop/siim-acr-pneumothorax-segmentation/dicom-images-train/*/*/*.dcm' TEST_PATH = 'D:\OneDrive\Desktop\siim-acr-pneumothorax-segmentation\dicom-images-test\*\*.dcm' RLE_SAMPLE = 'D:/OneDrive/Desktop/siim-acr-pneumothorax-segmentation/sample images/train-rle-sample.csv' RLE_PATH = 'D:/OneDrive/Desktop/siim-acr-pneumothorax-segmentation/train-rle.csv' import os import pydicom import pandas as pd import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from collections import defaultdict from pydicom.data import get_testdata_files from glob import glob from tqdm import tqdm # + # def Read_dcm(dataset): # print("Filename\t\t\t:", DATASET_PATH) # print("Storage\t\t\t\t:", dataset.SOPClassUID) # name = dataset.PatientName # display_name = name.family_name + ", " + name.given_name # print("Patient's Name\t\t\t:", display_name) # print("Patient's ID\t\t\t:", dataset.PatientID) # print("Patient's Age\t\t\t:", dataset.PatientAge) # print("Patient's Sex\t\t\t:", dataset.PatientSex) # print("Modality\t\t\t:", dataset.Modality) # print("Body Part Examined\t\t:", dataset.BodyPartExamined) # print("View Position\t\t\t:", dataset.ViewPosition) # if 'PixelData' in dataset: # rows = int(dataset.Rows) # cols = int(dataset.Columns) # print("Image size\t\t\t: {rows:d} x {cols:d}, {size:d} bytes".format(rows=rows, cols=cols, size=len(dataset.PixelData))) # if 'PixelSpacing' in dataset: # print("Pixel spacing\t\t\t:", dataset.PixelSpacing) # print('\n') # - def PixelArray(dataset, figsize=(10,10)): plt.figure(figsize=figsize) plt.imshow(dataset.pixel_array, cmap=plt.cm.bone) print(dataset.pixel_array) print(plt.cm.bone) plt.show() # + def mask2rle(img, width, height): rle = [] lastColor = 0; currentPixel = 0; runStart = -1; runLength = 0; for x in range(width): for y in range(height): currentColor = img[x][y] if currentColor != lastColor: if currentColor == 255: runStart = currentPixel; runLength = 1; else: rle.append(str(runStart)); rle.append(str(runLength)); runStart = -1; runLength = 0; currentPixel = 0; elif runStart > -1: runLength += 1 lastColor = currentColor; currentPixel+=1; return " ".join(rle) def rle2mask(rle, width, height): mask= np.zeros(width* height) array = np.asarray([int(x) for x in rle.split()]) starts = array[0::2] lengths = array[1::2] current_position = 0 for index, start in enumerate(starts): current_position += start mask[current_position:current_position+lengths[index]] = 255 current_position += lengths[index] return mask.reshape(width, height) # - # filename = get_testdata_files('rtplan.dcm')[0] files = sorted(glob(DATASET_PATH)) len(files) RLEs = pd.read_csv(RLE_PATH) len(RLEs) RLEsL = defaultdict(list) for image_id, rle in zip(RLEs['ImageId'], RLEs[' EncodedPixels']): RLEsL[image_id].append(rle) annotated = {k: v for k, v in RLEsL.items() if v[0] != ' -1'} print("%d of %d images are annotated (Positive) " % (len(annotated), len(RLEsL))) print('Missing values are: (DROP THESE)', len(files) - len(RLEsL)) # + # means = [] # for fn in tqdm(files): # img = pydicom.read_file(fn).pixel_array # means.append(img.mean()) # + # print("Test mean: ", np.mean(means)) # + # for file_path in glob(SAMPLE_PATH): # dataset = pydicom.dcmread(file_path) # Read_dcm(dataset) # PixelArray(dataset) # break # + start = 0 numberOfImages = 10 # fig, ax = plt.subplots(nrows = 1, ncols = numberOfImages, sharey=True, figsize=(numberOfImages*10, 10)) # for q, path in enumerate(glob(SAMPLE_PATH)[start:start+numberOfImages]): # dataset = pydicom.dcmread(path) # ax[q].imshow(dataset.pixel_array, cmap = plt.cm.bone) # - fig, ax = plt.subplots(nrows = 1, ncols = numberOfImages, sharey=True, figsize=(numberOfImages*10, 10)) for q, file in enumerate(glob(DATASET_PATH)[start:start+numberOfImages]): dataset = pydicom.dcmread(file) ax[q].imshow(dataset.pixel_array, cmap=plt.cm.bone) if RLEs.loc[file.split('\\')[-1][:-4],1] != '-1': mask = rle2mask(RLEs.loc[file.split('\\')[-1][:-4],1], 1024, 1024).T ax[q].set_title('See Marker') ax[q].imshow(mask, alpha=0.7, cmap='Greens') else: ax[q].set_title('Nothing to see')
ATTEMPT102.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # NumPy Basics # Importing import numpy as np # ## Creation of np.array # Creation of np.array from list list_a = [1, 2, 3] a = np.array(list_a) print(a[9]) print(type(a)) print(a.shape) # 2D array list_b = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] b = np.array(list_b) print(b) print(type(b)) print(b.shape) # To create arrays of 0s and 1s zero_array = np.zeros((31)) one_array = np.ones((3,4)) zero_array[30] = 1 print(zero_array) print(one_array) # Generating random numbers c = np.random.randint(low=1, high=10, size=(3,3)) d = np.random.rand(8, 3) e = np.random.random_sample((3, 4)) print(c,"\n") print(d,"\n") print(e) # ## Reshaping arrays # Reshaping arrays into required shape d_1 = np.reshape(d, newshape=(6, 4)) # In order to reshape from 8x3 to 6x4, size can also be mentioned as (6, -1) # The remaining factor (in place of -1) is automatically computed d_2 = d.reshape((6, -1)) print(d_1,"\n") print(d_2,"\n") print(d_1 == d_2) # + # Task: Try to reshape the array into a 3D array of shape 2x3x4 d_3 = np.reshape(d, newshape=(2,3,4)) print(d_3) # - # ## Deletion of elements # Deletion of rows/columns d_col_delete = np.delete(d, 1, axis=1) d_row_delete = np.delete(d, 2, axis=0) print(d_col_delete.shape) print(d_row_delete.shape) # + # Learn the concepts of axis properly # Check StackOverflow or NumPy documentation for more details # + # Task: Delete a particular element of array `c` # - # ## Merging Rows or Columns # Merging Rows/Columns merge_rows = np.vstack((c, d)) merge_cols = np.hstack((c, e)) print(mergeRows) print(mergeCols) # + # Task: Try using np.concatenate to perform the above operations # - # # Aggregate Functions # + # Task: Find the row-wise minimum of the array `c` # Task: find the column-wise mean of array `d` # Task: Find the row-wise mean of array `e` # Task: Find the maximum of all values in the array `c`
Lookups/NumPyBasics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## File input / output # # see also: https://github.com/ValRCS/ValRCS-LU_PySem_2020_2/blob/master/core/Python%20Reading%20Writing%20Files.md # # Python File Reading / Writing # # * https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files # * https://realpython.com/read-write-files-python/ # * https://realpython.com/working-with-files-in-python/ # * https://www.datacamp.com/community/tutorials/reading-writing-files-python # # * https://www.pythonforbeginners.com/files/reading-and-writing-files-in-python # # * http://geospatialtraining.com/file-handling-with-python-for-gis-programmers/ # # # !ls ../data/ # !ls -la ../data/ f1 = open("../data/Veidenbaums.txt", "r") type(f1) f1.readline() for t in f1.readlines(): print(t) f1.seek(0) for line in f1: print(line, end="") f1.close() with open("../data/Veidenbaums.txt") as f1: for line in f1: print(line, end="") with open("../data/Veidenbaums.txt") as f1: data = f1.readlines() data[:10] with open("../data/Veidenbaums_clean.txt", "w") as f1: clean_data = [item for item in data if len(item.strip()) > 0] len(clean_data), len(data) clean_data[:10] with open("../data/Veidenbaums_clean.txt", "w") as f1: f1.writelines(clean_data) # !cat ../data/Veidenbaums_clean.txt with open("../data/Veidenbaums.txt", "r") as f1, \ open("../data/Veidenbaums_clean.txt", "w") as out_f: for line in f1: if len(line.strip()) > 0: out_f.write(line) # !cat ../data/Veidenbaums_clean.txt help(open)
libraries/File input and output.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import networkx as nx import random import numpy as np import matplotlib.pyplot as plt import scipy.stats # # SMALL WORLD # Pour chaque N de NList je génère 2 graphes. # Pour chaque réalisation de l'expérience j'ajoute le graphe de barabasi_albert. # Créer plusieurs expérience pour chaque valeur de paramètre qui nous intéresse. # La génération est très rapide. Nlist=[100,250,500,750,1000,1250,1500,1750,2000] NREPL=2 m=3 #graph generation graphs={} for nn in Nlist: print(nn) graphs[nn]=[] for r in range(NREPL): graphs[nn].append(nx.barabasi_albert_graph(nn,3)) # Je calcule les avShPath pour tous ces réseaux et je fais la moyenne pour ceux qui ont le même paramètre. Par exemple pour 500 noeuds j'ai 2 valeurs d'avShPath et je prends la moyenne entre ces deux valeurs. avShPath=[] for nn in Nlist: print(nn) l=[] for G in graphs[nn]: l.append(nx.average_shortest_path_length(G)) avShPath.append(np.mean(l)) # Je regarde ça dans l'espace linéaire on a une courbe, et dans un espace semi-logarithmique on a ça. # Donc on a une courbe logarithmique de barabasi alberte respecte l'effet de **small world** # + plt.plot(Nlist,avShPath,'o-',markersize=10) plt.xlabel('N',size=15) plt.ylabel('<l>',size=15) plt.show() plt.semilogx(Nlist,avShPath,'o-',markersize=10) plt.xlabel('N',size=15) plt.ylabel('<l>',size=15) plt.title('it is a small world network') plt.show() # - # # clustering # Je veux désormais regarder le clustering. On fait pareil, on calcul le clustering de chaque réseau et on prend la moyenne sur les expériences. Et là par contre le clustering pour les réseaux très grands deviennent très faibles. Donc le clustering n'est pas tout à fait élevé pour ce type de réseau. clustering=[] for nn in Nlist: print nn c=[] for G in graphs[nn]: clustList=[nx.clustering(G,i) for i in G.nodes()] c.append(np.mean(clustList)) clustering.append(np.mean(c)) plt.plot(Nlist,clustering,'o-',markersize=10) plt.xlabel('N',size=15) plt.ylabel('<c>',size=15) plt.title('clustering goes to 0') plt.show() # # degree Mixing # Est-ce que les noeuds élevés ont tendances à être connectés de degrés élevés et faible avec faible ? (assortativité). Pour cela on doit regarder le degré moyen des voisins. N=5000 m=3 Nrepl=10 # Remarque : il y a une nouvelle commande dans networkx qui permet de mesurer le degré des voisins. A regarder, ce n'est pas utilisé ici. knn=[] kk=[] for r in range(Nrepl): G=nx.barabasi_albert_graph(N,3) knn=knn+[ np.mean([G.degree(v) for v in G.neighbors(u)]) for u in G.nodes()] kk=kk+[G.degree(u) for u in G.nodes()] # La moyenne correspond aux points orange. Un réseau de Barabasi Albert il n'y a pas de corrélation de degrés. Comment il existe des noeuds connectés des "rich clubs" (réécouter). Si vous avez besoin d'estimer la relations les plus élevés # + plt.loglog(kk,knn,'o',alpha=0.1) logBins=np.logspace(np.log2(np.min(kk)),np.log2(np.max(kk)),base=2,num=15) #if I use np.linspace I will have linear bins ybin,xbin,binnumber=scipy.stats.binned_statistic(kk,knn,statistic='mean',bins=logBins) plt.loglog(xbin[:-1],ybin,'o',markersize=10) plt.xlabel('k',size=15) plt.ylabel('knn(k)',size=15) plt.title('there is not degree correlation') plt.show() # -
CC6/BAnetworks_properties_KA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <!--NOTEBOOK_HEADER--> # *This notebook contains material from [Controlling Natural Watersheds](https://jckantor.github.io/Controlling-Natural-Watersheds); # content is available [on Github](https://github.com/jckantor/Controlling-Natural-Watersheds.git).* # <!--NAVIGATION--> # < [Rainy River Flows](http://nbviewer.jupyter.org/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/A.05-Rainy_River_Flows.ipynb) | [Contents](toc.ipynb) | [USGS Surface Water Daily Data](http://nbviewer.jupyter.org/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/A.07-USGS_Surface_Water_Daily_Data.ipynb) ><p><a href="https://colab.research.google.com/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/A.06-Namakan_Lake_Outflows.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://raw.githubusercontent.com/jckantor/Controlling-Natural-Watersheds/master/notebooks/A.06-Namakan_Lake_Outflows.ipynb"><img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a> # # Namakan Lake Outflows # The purpose of this notebook is to create a data series for the outflows from Namakan Lake. The data series is written to a file `NL_outflow.pkl` that can be read into other notebooks using `NL = pd.read_pickle('../data/NL_outflow.pkl')`. # # The data series is constructed using data provided by <NAME> of the International Joint Commission. # ## Read Data # # + # Display graphics inline with the notebook # %matplotlib notebook # Standard Python modules import numpy as np import matplotlib.pyplot as plt import pandas as pd import os import datetime import requests # Modules to display images and data tables from IPython.display import Image from IPython.core.display import display NL_outflow = pd.read_excel('../data/Namakan_Outflows.xls',index_col=0,header=3)['Stn 47'] # + plt.figure(figsize=(10,5)) plt.hold(True) NL_outflow.plot() plt.hold(False) plt.xlabel('Year') plt.ylabel('Level [meters]') plt.grid() # - RLLevelFlow = pd.read_csv('../data/RLLevelFlow.csv',index_col=0,parse_dates=True) RL_inflow = RLLevelFlow['Inflow'] # + plt.figure(figsize=(10,5)) plt.hold(True) NL_outflow.plot() RL_inflow.plot() plt.hold(False) plt.xlabel('Year') plt.ylabel('Level [meters]') plt.grid() # + flows = pd.concat([RL_inflow,NL_outflow],axis=1).dropna() plt.figure(figsize=(10,5)) plt.hold(True) flows['Inflow'].plot() flows['Stn 47'].plot() # + plt.figure(figsize=(10,5)) plt.hold(True) plt.plot(flows.ix['1970':'1999','Inflow'],flows.ix['1970':'1999','Stn 47'],'.',ms=5,color='b',alpha=0.6) plt.plot(flows.ix['2000':,'Inflow'],flows.ix['2000':,'Stn 47'],'.',ms=5,color='r',alpha=0.6) plt.xlim(0,2000) plt.ylim(0,1000) plt.grid() plt.title('Namakan Outflow vs Rainy Lake Inflow, Daily for 1970-2014') plt.ylabel('Namakan Outflow [cubic meters/sec]') plt.xlabel('Rainy Lake Inflow [cubic meters/sec]') # - q = (flows['Stn 47']/flows['Inflow']) plt.ylim(0,1) q.plot(makers='.') plt.figure(figsize=(10,5)) q = q[q > 0] q = q[q < 1] q.hist(bins=100,normed=True) plt.xlim(0,1) print q.mean() # ## Data Reconciliation NL.to_pickle('../data/NL.pkl') # <!--NAVIGATION--> # < [Rainy River Flows](http://nbviewer.jupyter.org/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/A.05-Rainy_River_Flows.ipynb) | [Contents](toc.ipynb) | [USGS Surface Water Daily Data](http://nbviewer.jupyter.org/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/A.07-USGS_Surface_Water_Daily_Data.ipynb) ><p><a href="https://colab.research.google.com/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/A.06-Namakan_Lake_Outflows.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://raw.githubusercontent.com/jckantor/Controlling-Natural-Watersheds/master/notebooks/A.06-Namakan_Lake_Outflows.ipynb"><img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
notebooks/A.06-Namakan_Lake_Outflows.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import pickle import load from utils import match import numpy as np wdir = '/home/hoseung/Work/data/05427/' nout=187 is_gal=True # Load galaxy catalog cat = pickle.load(open(wdir + '/catalog_GM/' + 'catalog' + str(nout) + '.pickle', 'rb')) # Load tree brick (GM) import tree.halomodule as hmo info = load.info.Info(nout=nout, base=wdir, load=True) hh = hmo.Halo(base=wdir, nout=nout, halofinder='HM', info=info, load=True, is_gal=is_gal) hind = match.match_list_ind(hh.data['id'], cat['id']) h = hmo.Halo(base=wdir, nout=nout, halofinder='HM', info=info, is_gal=is_gal) h.derive_from(hh, hind) # + x = np.log10(cat['mstar']) y = np.log10(h.data['m']) fig, ax = plt.subplots(1) ax.scatter(x,y) ax.plot([9,11.5],[9,11.5]) ax.set_xlabel("lambda_mp") ax.set_ylabel("GalaxyMaker") for name, xx,yy in zip(cat['id'],x,y): ax.annotate(str(name), xy=(xx, yy), xytext=(-15, 10), textcoords='offset points') #arrowprops=dict(facecolor='black', shrink=0.05) ) plt.show() # - fig, ax = plt.subplots(1) x = h.data['rvir'] y = h.data['r'] ax.scatter(x,y) ax.plot([0,0.1],[0,0.1]) ax.set_xlim([0,0.001]) ax.set_ylim([0,0.001]) ax.set_xlabel("rvir_GM") ax.set_ylabel("r_GM") for name, xx,yy in zip(cat['id'],x,y): ax.annotate(str(name), xy=(xx, yy), xytext=(-15, 10), textcoords='offset points') #arrowprops=dict(facecolor='black', shrink=0.05) ) plt.show()
scripts/notebooks/halo/Compare_cat_GM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # default_exp metrics # - # # Metrics # # > This contains metrics not included in fastai. #export import sklearn.metrics as skm from fastai.metrics import * from tsai.imports import * #export mk_class('ActivationType', **{o:o.lower() for o in ['No', 'Sigmoid', 'Softmax', 'BinarySoftmax']}, doc="All possible activation classes for `AccumMetric") #export def MatthewsCorrCoefBinary(sample_weight=None): "Matthews correlation coefficient for single-label classification problems" return AccumMetric(skm.matthews_corrcoef, dim_argmax=-1, activation=ActivationType.BinarySoftmax, thresh=.5, sample_weight=sample_weight) #export def get_task_metrics(dls, binary_metrics=None, multi_class_metrics=None, regression_metrics=None, verbose=True): if dls.c == 2: pv('binary-classification task', verbose) return binary_metrics elif dls.c > 2: pv('multi-class task', verbose) return multi_class_metrics else: pv('regression task', verbose) return regression_metrics # All metrics applicable to multi classification have been created by <NAME> (https://github.com/williamsdoug). Thanks a lot Doug!! # + #export def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True, by_sample=False): "Computes accuracy when `inp` and `targ` are the same size." if sigmoid: inp = inp.sigmoid() correct = (inp>thresh)==targ.bool() if by_sample: return (correct.float().mean(-1) == 1).float().mean() else: inp,targ = flatten_check(inp,targ) return correct.float().mean() def metrics_multi_common(inp, targ, thresh=0.5, sigmoid=True, by_sample=False): "Computes TP, TN, FP, FN when `inp` and `targ` are the same size." if not by_sample: inp,targ = flatten_check(inp,targ) if sigmoid: inp = inp.sigmoid() pred = inp>thresh correct = pred==targ.bool() TP = torch.logical_and(correct, (targ==1).bool()).sum() TN = torch.logical_and(correct, (targ==0).bool()).sum() incorrect = pred!=targ.bool() FN = torch.logical_and(incorrect, (targ==1).bool()).sum() FP = torch.logical_and(incorrect, (targ==0).bool()).sum() N = targ.size()[0] return N, TP, TN, FP, FN def precision_multi(inp, targ, thresh=0.5, sigmoid=True): "Computes precision when `inp` and `targ` are the same size." inp,targ = flatten_check(inp,targ) if sigmoid: inp = inp.sigmoid() pred = inp>thresh correct = pred==targ.bool() TP = torch.logical_and(correct, (targ==1).bool()).sum() FP = torch.logical_and(~correct, (targ==0).bool()).sum() precision = TP/(TP+FP) return precision def recall_multi(inp, targ, thresh=0.5, sigmoid=True): "Computes recall when `inp` and `targ` are the same size." inp,targ = flatten_check(inp,targ) if sigmoid: inp = inp.sigmoid() pred = inp>thresh correct = pred==targ.bool() TP = torch.logical_and(correct, (targ==1).bool()).sum() FN = torch.logical_and(~correct, (targ==1).bool()).sum() recall = TP/(TP+FN) return recall def specificity_multi(inp, targ, thresh=0.5, sigmoid=True): "Computes specificity (true negative rate) when `inp` and `targ` are the same size." inp,targ = flatten_check(inp,targ) if sigmoid: inp = inp.sigmoid() pred = inp>thresh correct = pred==targ.bool() TN = torch.logical_and(correct, (targ==0).bool()).sum() FP = torch.logical_and(~correct, (targ==0).bool()).sum() specificity = TN/(TN+FP) return specificity def balanced_accuracy_multi(inp, targ, thresh=0.5, sigmoid=True): "Computes balanced accuracy when `inp` and `targ` are the same size." inp,targ = flatten_check(inp,targ) if sigmoid: inp = inp.sigmoid() pred = inp>thresh correct = pred==targ.bool() TP = torch.logical_and(correct, (targ==1).bool()).sum() TN = torch.logical_and(correct, (targ==0).bool()).sum() FN = torch.logical_and(~correct, (targ==1).bool()).sum() FP = torch.logical_and(~correct, (targ==0).bool()).sum() TPR = TP/(TP+FN) TNR = TN/(TN+FP) balanced_accuracy = (TPR+TNR)/2 return balanced_accuracy def Fbeta_multi(inp, targ, beta=1.0, thresh=0.5, sigmoid=True): "Computes Fbeta when `inp` and `targ` are the same size." inp,targ = flatten_check(inp,targ) if sigmoid: inp = inp.sigmoid() pred = inp>thresh correct = pred==targ.bool() TP = torch.logical_and(correct, (targ==1).bool()).sum() TN = torch.logical_and(correct, (targ==0).bool()).sum() FN = torch.logical_and(~correct, (targ==1).bool()).sum() FP = torch.logical_and(~correct, (targ==0).bool()).sum() precision = TP/(TP+FP) recall = TP/(TP+FN) beta2 = beta*beta if precision+recall > 0: Fbeta = (1+beta2)*precision*recall/(beta2*precision+recall) else: Fbeta = 0 return Fbeta def F1_multi(*args, **kwargs): return Fbeta_multi(*args, **kwargs) # beta defaults to 1.0 # - #export def mae(inp,targ): "Mean absolute error between `inp` and `targ`." inp,targ = flatten_check(inp,targ) return torch.abs(inp - targ).mean() #export def mape(inp,targ): "Mean absolute percentage error between `inp` and `targ`." inp,targ = flatten_check(inp, targ) return (torch.abs(inp - targ) / torch.clamp_min(targ, 1e-8)).mean() # + #export def _recall_at_specificity(inp, targ, specificity=.95, axis=-1): inp0 = inp[targ == 0] inp1 = inp[targ == 1] thr = torch.sort(inp0).values[-int(len(inp0) * (1 - specificity))] return (inp1 > thr).float().mean() recall_at_specificity = AccumMetric(_recall_at_specificity, specificity=.95, activation=ActivationType.BinarySoftmax, flatten=False) # + #export def _mean_per_class_accuracy(y_true, y_pred, *, labels=None, sample_weight=None, normalize=None): cm = skm.confusion_matrix(y_true, y_pred, labels=labels, sample_weight=sample_weight, normalize=normalize) return (cm.diagonal() / cm.sum(1)).mean() mean_per_class_accuracy = skm_to_fastai(_mean_per_class_accuracy) # - #hide from tsai.imports import * from tsai.export import * nb_name = get_nb_name() create_scripts(nb_name);
nbs/051_metrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import time import astropy import itertools import numpy as np import pylab as pl import matplotlib.pyplot as plt import astropy.units as u from astropy.cosmology import FlatLambdaCDM from IPython.display import YouTubeVideo from tools.flops import flops # + # %matplotlib notebook plt.style.use('dark_background') # + [markdown] lang="es" # # DESI y la supercomputadora más rápida de Occidente # + [markdown] lang="es" # Entender _cómo_ los 30 millones de galaxias estudiadas por DESI se formaron en el Universo es difícil, realmente difícil. De hecho, es tan difícil que DESI explota [Summit](https://www.olcf.ornl.gov/summit/), la supercomputadora más rápida del mundo en el Laboratorio Nacional de Oak Ridge para calcular cómo debería verse la distribución de las galaxias según el tipo de Energía Oscura: # - # <img src="images/summit.jpg" alt="Drawing" style="width: 800px;"/> # + [markdown] lang="es" # Con un costo de construcción de 325 millones de dólares, Summit es capaz de calcular $1.486 \times 10^{17}$ operaciones de suma y multiplicación por segundo, equivalente a $1.486 \times 10^{11}$ MegaFlops o MFLOPS. A modo de comparación, veamos qué proporciona Binder (necesitarás un poco de paciencia, tal vez quieras dejar esto para más adelante): # - _ = flops() # + [markdown] lang="es" # ¡Así que Summit es al menos mil millones de veces más poderoso! Con Summit, podemos resolver los detalles más finos de la distribución de _materia oscura_ que trazan todas las galaxias: # - # <img src="images/abacus.png" alt="Drawing" style="width: 600px;"/> # + [markdown] lang="es" # Aquí, las regiones más brillantes significan las regiones más densas de materia oscura en el Universo, en las que esperamos encontrar más galaxias. El siguiente video muestra que hemos observado esta estructura predicha en la distribución de galaxias reales, observadas con experimentos antes de DESI: # - YouTubeVideo('08LBltePDZw', width=800, height=400) # + [markdown] lang="es" # [La Materia Oscura](https://es.wikipedia.org/wiki/Materia_oscura) es un elemento omnipresente en nuestro Universo, que constituye el 25% de la densidad (de energía) total. Con la Energía Oscura y los átomos comunes (materia bariónica) formando el resto. No sabemos casi nada sobre la materia oscura, más allá de su atracción gravitacional a otra materia y luz en el Universo. # + [markdown] lang="es" # Afortunadamente, las ecuaciones que describen la evolución de la materia oscura, a diferencia de la [formación compleja de galaxias](https://es.wikipedia.org/wiki/Formación_y_evolución_de_las_galaxias) (ver [también](https://www.space.com/15680-galaxies.html)), son relativamente simples para el Universo en el que vivimos. Todo lo que se requiere es rastrear la atracción gravitacional de las partículas de materia oscura (en un Universo en expansión). # + [markdown] lang="es" # Podemos predecir la evolución de la materia oscura muestreando la fuerza gravitacional, la velocidad y la posición con un conjunto de partículas (ficticias) que representan cada una un "grupo" de materia oscura con una masa total. Por supuesto, esto significa que no podemos resolver la distribución de la materia oscura dentro de estas regiones del tamaño de grupos, pero solo la distribución entre aglomerados conduce a la estructura que puede verse arriba. Con Summit, el grupo más pequeño que podemos resolver no está lejos de la masa del 'halo' de materia oscura que rodea el centro de nuestra propia [Vía Láctea](https://manuastronomo.wordpress.com/2019/03/11/hubble-y-gaia-pesan-con-precision-la-via-lactea/): # - # <img src="images/MilkyWay.jpg" alt="Drawing" style="width: 1000px;"/> # + [markdown] lang="es" # Para empezar, colocaremos inicialmente un conjunto de grupos en posiciones aleatorias dentro de un cubo 3D y les daremos velocidades iniciales cero. Las velocidades se generarán en momentos posteriores, ya que la atracción gravitacional ($1/r^2$) de una partícula hacia todas las demás provoca una aceleración neta. # - def init_dof(npt=1): #Crea un conjunto de partículas con posiciones aleatorias en una caja, #las cuales predicen la distribución de materia oscura, como vemos arriba. xs = np.random.uniform(0., 1., npt) ys = np.random.uniform(0., 1., npt) zs = np.random.uniform(0., 1., npt) pos = np.vstack((xs, ys, zs)).T vel = np.zeros_like(pos) return pos, vel # + [markdown] lang="es" # La fuerza gravitacional experimentada por cada partícula de materia oscura es $F = \frac{GmM}{r^2} \hat r$, la ley de gravitación Universal de [Newton](https://en.wikipedia.org/wiki/Isaac_Newton), con la que puede que estés familiarizado. Solo necesitamos hacer un trabajo minucioso en la contabilidad requerida para calcular la fuerza total experimentada por una partícula debido a todas las demás: # - def g_at_particle(index, pos, mass, epsilon=1.0, doimages=True): # Ecuación 10 de http://www.skiesanduniverses.org/resources/KlypinNbody.pdf # Para español puedes ver también https://rmf.smf.mx/pdf/rmf/38/5/38_5_701.pdf indices = np.arange(len(pos)) rest = pos[indices != index,:] #Aquí epsilón es un factor de control para prevenir que las partículas se acerquen, y la fuerza gravitacional sea infinita. result = mass * np.sum((rest - pos[index]) / ((rest - pos[index])**2. + epsilon**2.)**(3./2.), axis=0) if doimages: # Nuestra simulación asume condiciones de frontera periódicas, para la aceleración de cada partícula # está la correspondiente aceleración de una imagen de la partícula producida al aplicar cambios periódicos en su posición. shift = np.array([-1, 0, 1]) images = [] for triple in itertools.product(shift, repeat=3): images.append(triple) images.remove((0, 0, 0)) images = np.array(images) for image in images: result += mass * np.sum((rest + image - pos[index]) / ((rest + image - pos[index])**2. + epsilon**2.)**(3./2.), axis=0) return result # + [markdown] lang="es" # En un experimento notable en 1941, <NAME> utilizó el hecho de que el brillo de la luz decae con la distancia al mismo ritmo ($1/r^2$) que la gravedad. Para calcular la fuerza total sobre una 'partícula' en su 'simulación', Erik colocó una bombilla en la posición de cada partícula y calculó la fuerza efectiva sobre una partícula dada midiendo el brillo total en cada punto. La siguiente figura ilustra esta idea. ¡Intenta ejecutar la siguiente celda varias veces! Obtendrás un diseño aleatorio diferente de bombillas cada vez. # + fig, ax = plt.subplots(1, 1, figsize=(5,5)) xmin, xmax, ymin, ymax = (0., 1., 0., 1.) N = 100 xx, yy = np.meshgrid(np.linspace(xmin, xmax, N), np.linspace(ymin, ymax, N)) epsilon = 0.1 weights = np.zeros_like(xx) pos, vel = init_dof(npt=10) for par in pos: weights += 1. / ((xx - par[0])**2 + (yy - par[1])**2 + epsilon**2.) ax.imshow(weights, extent=(xmin, xmax, ymin, ymax), cmap=plt.cm.afmhot, alpha=0.5, origin='lower') ax.plot(xx[:,0], xx[:,1], '.', c='darkorange', alpha=0.75) ax.tick_params(labelbottom=False, labelleft=False) ax.set_xlim(0., 1.) ax.set_ylim(0., 1.) plt.axis('off') # + [markdown] lang="es" # Este trabajo fue el concepto original de las simulaciones gravitacionales de 'n-cuerpos' que se describen aquí. ¡Es casi un crimen que solo 118 autores hayan hecho referencia a esta innovadora idea! # - # <img src="images/Holmberg.png" alt="Drawing" style="width: 800px;"/> # + [markdown] lang="es" # Hoy, dadas las mini supercomputadoras que tenemos a menudo al alcance de la mano, podemos determinar la distribución final de la materia oscura con mayor precisión con computadoras que con bombillas. Evolucionando una distribución homogénea inicial (una distribución casi uniforme de aglomeraciones de materia oscura, como el universo produjo en el Big Bang) bajo la gravedad, podemos predecir con precisión la ubicación de las galaxias (los lugares donde se forman los grupos de materia oscura más grandes). # # Para hacer esto, solo necesitamos calcular la aceleración de cada partícula en una serie de pasos de tiempo y actualizar la velocidad y la posición de acuerdo con la aceleración que experimenta la partícula. Habrás experimentado esto como la sensación que sientes cuando un automóvil dobla una esquina o acelera. # + # Haremos un muestreo de las ecuaciones de movimiento en pasos de tiempo discretos. dt = 1.e-2 nsteps = 500 timesteps = np.linspace(0.0, nsteps * dt, nsteps) dt = np.diff(timesteps)[0] mass = 0.25 # + [markdown] lang="es" # Ahora simplemente tenemos que ejecutar la simulación, # + fig, ax = plt.subplots(1,1, figsize=(8,8)) ax.tick_params(labelbottom=False, labelleft=False) plt.axis('off') # Reinitialise particles. pos, vel = init_dof(npt=50) for index_in_timestep , time in enumerate(timesteps): pl.cla() ax.text(0.01, 1.05, '$t={:d}$'.format(index_in_timestep), transform=ax.transAxes) dvel = np.zeros_like(vel) dpos = np.zeros_like(pos) for index_in_particle, _ in enumerate(pos): g_X = g_at_particle(index_in_particle, pos, mass, doimages=True) # Update velocities. dvel[index_in_particle] = dt * g_X # Update positions. dpos[index_in_particle] = dt * vel[index_in_particle] vel += dvel pos += dpos # Our simulation has periodic boundaries, if you go off one side you come back on the other! pos = pos % 1. ax.plot(pos[:,0], pos[:,1], '.', c='darkorange', alpha=0.75) ax.set_xlim(0., 1.) ax.set_ylim(0., 1.) fig.canvas.draw() # + [markdown] lang="es" # ¡Intenta jugar con la configuración! Sin embargo, más de 100 partículas no funcionarán muy bien. Con la configuración predeterminada, encontrarás que las partículas tienden a caer en uno o dos cúmulos en poco tiempo. Esto se debe al arrastre que aplicamos. El arrastre simula el efecto que tiene el universo en expansión sobre las partículas reales de materia oscura, que es alentarlas y hacer que se agrupen. Estos grupos se conocen como halos y forman criaderos galácticos donde el gas puede acumularse para formar nuevas estrellas y galaxias. # # Ahora, cuando DESI ejecuta grandes simulaciones, como las que se ejecutan en Summit, se resuelve un total de ~ 48 _billones de_ partículas. ¡No intentes esto aquí! Pero los resultados son realmente extraordinarios (¡salte al minuto 6 y 45 segundos si estás impaciente por ver el resultado!): # - YouTubeVideo('LQMLFryA_7k', width=800, height=400) # + [markdown] lang="es" # Con este gran éxito, viene una responsabilidad adicional. Esta informática de alto rendimiento, si bien es fantástica para DESI y la ciencia, ahora tiene una [huella de carbono](https://es.wikipedia.org/wiki/Huella_de_carbono) comparable a la [industria mundial de las aerolíneas](https://www.hpcwire.com/solution_content/ibm/cross-industry/five-tips-to-reduce-your-hpc-carbon-footprint/) ([aquí](https://porelclima.es/equipo/2873-centros-de-datos-sostenibles-reduciendo-la-huella-de-internet) un artículo similar en español) y consume la misma cantidad de electricidad que el país de Irán (¡82 millones de personas!). # # Más preocupante aún, esto pronto crecerá del 2% del consumo de energía del mundo a ~ 30%. ¡Una tasa extraordinaria! # + [markdown] lang="es" # Afortunadamente, Summit también se encuentra entre las supercomputadoras más ecológicas. Sus 14,7 GFlops/watt la pone número 1 en la [lista global Green 500 2019](https://www.top500.org/lists/green500/2019/06/). ¡Mejor suerte el año que viene, E.U.! # - # <img src="images/Amazon.jpg" alt="Drawing" style="width: 800px;"/>
Espanol/nbody_es.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests import pprint import json url = 'http://weather.livedoor.com/forecast/webservice/json/v1' params = {'city': 130010} r = requests.get(url, params=params) print(r.headers['Content-Type']) json_data = r.json() print(type(json_data)) pprint.pprint(json_data, depth=2, compact=True) print(json_data['description']['text']) pprint.pprint(json_data['forecasts'][0]) with open('data/temp/download.json', 'w') as f: json.dump(json_data, f, ensure_ascii=False, indent=4)
notebook/requests_json.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Percentiles** For example, let's consider the sizes of the five largest continents – Africa, Antarctica, Asia, North America, and South America – rounded to the nearest million square miles. import numpy as np sizes = np.array([12, 17, 6, 9, 7]) sizes # The `80th` percentile is the smallest value that is at least as large as `80%` of the elements of sizes # # **Step 1**: sort the list in ascending order <br/> # **Step 2**: grasp `80%` of the elements from left to right sorted_sizes = np.sort(sizes) sorted_sizes number_of_elements = 0.8*(len(sizes)-1) number_of_elements # `80th` percentile is at index `3th` (round down) or the number `12` sorted_sizes[3] # `80th` percentile is at index `4th` (round up) or the number `17` sorted_sizes[4] # **Handling with floating rank** number_of_elements = 0.7*(len(sizes)-1) number_of_elements # round it up, becomes index `3th`; then `70th` percentile is at number `12` sorted_sizes[3] # **Interpoate ("linear" approach) with floating rank** # **Step 1**: Determine the elements at the calculated rank using fomular `r=p(n-1)`; `70th` is at r=0.7*(5-1)=2.8; Example, rank `2.8` means that positions of elements `2th` and `3th` which are `9` and `12`, respectively # **Step 2**: Take the difference between these two elements and multiply it by the fractional portion of the rank. For our example, this is: `(12 – 9)0.8 = 2.4`. <br/> # **Step 3**: Take the lower-ranked value in **Step 1** and add the value from **Step 2** to obtain the interpolated value for the percentile. For our example, that value is `9 + 2.4 = 11.4`. # **Usig numpy and pandas** np.percentile(sizes, 80, interpolation='linear') np.percentile(sizes, 70, interpolation='linear') # + import pandas as pd my_data = { "Size": sizes } df = pd.DataFrame(my_data) df # - df["Size"].quantile(0.8, interpolation='linear') df["Size"].quantile(0.7, interpolation='linear') # **Other example** import pandas as pd scores_and_sections = pd.read_csv('scores_by_section.csv') scores_and_sections scores_and_sections['Midterm'].hist(bins=np.arange(-0.5, 25.6, 1)) scores_and_sections['Midterm'].quantile(0.85) # **Quantiles** scores_and_sections['Midterm'].quantile(0.25) scores_and_sections['Midterm'].quantile(0.50) scores_and_sections['Midterm'].quantile(0.75) scores_and_sections['Midterm'].quantile(1) scores_and_sections['Midterm'].max() # **Bootstrap** We study the `Total Compensation` column df = pd.read_csv("san_francisco_2015.csv") df # we will focus our attention on those who had at least the equivalent of a half-time job for the whole year. At a minimum wage of about `$10` per hour, and `20` hours per week for `52` weeks, that's a salary of about `$10,000`. df = df.loc[df["Salaries"] > 10000] df # Visualize the histogram my_bins = np.arange(0, 700000, 25000) df['Total Compensation'].hist(bins=my_bins) # **Compute the median** pop_median = df['Total Compensation'].median() pop_median df['Total Compensation'].quantile(0.50) # **Now we estimate this value using bootstrap (resampling)** # + my_bins = np.arange(0, 700000, 25000) our_sample = df.sample(500, replace=False) our_sample['Total Compensation'].hist(bins=my_bins) # - est_median = our_sample['Total Compensation'].median() est_median our_sample['Total Compensation'].quantile(0.50) # The sample size is large. By the law of averages, the distribution of the sample resembles that of the population, and consequently the sample median is not very far from the population median (though of course it is not exactly the same). # So now we have one estimate of the parameter. But had the sample come out differently, the estimate would have had a different value. We would like to be able to quantify the amount by which the estimate could vary across samples. That measure of variability will help us measure how accurately we can estimate the parameter. # ## Bootstrap method # * Treat the original sample as if it were the population. # * Draw from the sample, at random with replacement, the same number of times as the original sample size. resample_1 = our_sample.sample(frac=1.0, replace=True) resample_1['Total Compensation'].hist(bins=my_bins) # Compute the median of the new sample resample_1['Total Compensation'].median() resample_2 = our_sample.sample(frac=1.0, replace=True) resampled_median_2 = resample_2['Total Compensation'].median() resampled_median_2 # Resamnpling for `5,000` times bstrap_medians = [] for i in range(1, 5000+1): one_resample = our_sample.sample(frac=1.0, replace=True) one_median = one_resample['Total Compensation'].median() bstrap_medians.append(one_median) # + my_median_data = { "Median": bstrap_medians } median_df = pd.DataFrame(my_median_data) median_df # - median_df.hist() # + import matplotlib.pyplot as plt plt.hist(bstrap_medians) plt.xlabel("Median") plt.ylabel("Frequency") plt.show() # + plt.hist(bstrap_medians, zorder=1) plt.xlabel("Median") plt.ylabel("Frequency") plt.scatter(pop_median, 0, color='red', s=30, zorder=2); plt.show() # - # Let's find out the middle `95%` of the resampled medians contains the red dot left = median_df.quantile(0.025) left right = median_df.quantile(0.975) right # **The population median of `$110,305` is between these two numbers. The interval and the population median are shown on the histogram below.** # + plt.hist(median_values, zorder=1) plt.xlabel("Median") plt.ylabel("Frequency") plt.plot([left, right], [0, 0], color='yellow', lw=3, zorder=2) plt.scatter(pop_median, 0, color='red', s=30, zorder=3); plt.show() # - # So, the "middle 95%" interval of estimates captured the parameter in our example # **Let repeat the processs 100 times to see how frequently the interval contains the parameter**. We will store all left and right ends per simulation. def bootstrap_sample(our_sample): bstrap_medians = [] for i in range(1, 5000+1): one_resample = our_sample.sample(frac=1.0, replace=True) one_median = one_resample['Total Compensation'].median() bstrap_medians.append(one_median) return bstrap_medians # + left_ends = [] right_ends = [] for i in range(1, 100+1): our_sample = df.sample(500, replace=False) bstrap_medians = bootstrap_sample(our_sample) my_median_data = { "Median": bstrap_medians } median_df = pd.DataFrame(my_median_data) left = median_df['Median'].quantile(0.025) right = median_df['Median'].quantile(0.975) left_ends.append(left) right_ends.append(right) # + my_left_right = { "Left": left_ends, "Right": right_ends } left_right_df = pd.DataFrame(my_left_right) left_right_df # - good_experiments = left_right_df[(left_right_df["Left"] < pop_median) & (left_right_df["Right"] > pop_median)] good_experiments # + for i in np.arange(100): left = left_right_df.at[i, "Left"] right = left_right_df.at[i, "Right"] plt.plot([left, right], [i, i], color='gold') plt.plot([pop_median, pop_median], [0, 100], color='red', lw=2) plt.xlabel('Median (dollars)') plt.ylabel('Replication') plt.title('Population Median and Intervals of Estimates') plt.show() # - # In other words, this process of estimation captures the parameter about `92%` of the time.
1-Lessons/Lesson17/OriginalPowerpoint/.ipynb_checkpoints/bootstrap-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:.conda-thesis] * # language: python # name: conda-env-.conda-thesis-py # --- # + # pytorch and fastai from fastai import * from fastai.vision import * from fastai.metrics import accuracy from sklearn.datasets import fetch_mldata from sklearn.model_selection import train_test_split pd.set_option('display.max_colwidth', -1) import os; os.chdir("/home/athon/thesis/notebooks") from tools.config import TCGA_MUT,GENE2VEC,FIGURES from tools.somatic import process_labels,filter_variants,filter_genes, \ visualise_distributions, dedup, reshape_pivot, \ convert_to_onehot, get_label_df os.chdir("/home/athon/thesis/notebooks/1_distributed_genes") # + def freememory(): """ Run garbage collection to free up memory. """ import gc gc.collect() def freegpu(data, model): del data del model freememory() torch.cuda.empty_cache() freememory() # + def train_stage1(data_dir, name, arch=models.resnet34, cycles=10, bs=32): data = (ImageList.from_folder(data_dir + '/') .split_by_folder(train='train', valid='valid') .label_from_folder() .databunch(bs=bs) .normalize(imagenet_stats) ) learn = cnn_learner(data, arch, metrics=[accuracy], callback_fns=ShowGraph) learn.fit_one_cycle(cycles,1e-3) learn.save('ONCO128_'.format(name)) preds,y = learn.get_preds() top1 = accuracy(preds,y) top5 = top_k_accuracy(preds,y,5) print('stage1 acc: {}, top5: {}'.format(top1,top5)) # Free up gpu and memory freegpu(data, learn) return top1,top5 def train_data(archs, cycles=1): data_dir = "../../data/gene_images/2d_clustering/ONCO_DIM_128" results = {'model':[],'dataset':[],'top1':[],'top5':[]} # iterate architectures for name, arch in archs: # iterate models dim_ = int(''.join(x for x in data_dir if x.isdigit())) batch_size = int(2**15 / dim_) print("dir: {} bs:{}".format(data_dir, batch_size)) top1,top5 = train_stage1(data_dir, name, arch, cycles, bs=batch_size) results['model'].append(name) results['dataset'].append(data_dir.split()) results['top1'].append(top1.item()) results['top5'].append(top5.item()) return pd.DataFrame(results) # + # Model intersections archs = [('resnet18',models.resnet18), ('resnet34',models.resnet34), ('resnet101',models.resnet101)] results_df = train_data(archs, cycles=10) # - results_df best_exp = "../../data/gene_images/2d_clustering/ONCO_DIM_128" data = (ImageList.from_folder(best_exp + '/') .split_by_folder(train='train', valid='valid') .label_from_folder() .databunch(bs=64) .normalize(imagenet_stats) ) arch = models.resnet101 learn = cnn_learner(data, arch, metrics=[accuracy], callback_fns=ShowGraph) learn.fit_one_cycle(10, 0.01) fig = learn.recorder.plot_losses(return_fig=True) fig.set_size_inches(16, 8) best_exp = "../../data/gene_images/2d_clustering/ONCO_DIM_PC50" data = (ImageList.from_folder(best_exp + '/') .split_by_folder(train='train', valid='valid') .label_from_folder() .databunch(bs=64) .normalize(imagenet_stats) ) arch = models.resnet101 learn = cnn_learner(data, arch, metrics=[accuracy], callback_fns=ShowGraph) learn.fit_one_cycle(10, 0.01) fig = learn.recorder.plot_losses(return_fig=True); fig.set_size_inches(16, 8) learn.fit_one_cycle(10, 0.005) fig = learn.recorder.plot_losses(return_fig=True); fig.set_size_inches(16, 8)
notebooks/1_distributed_genes/fastai_run3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # + # Make a dataframe to use in the subsequent steps of the analysis df = pd.DataFrame({"S.aureus": [239, 213, 178, 160], "A.baumanii": [246, 190, 151, 147], "E.coli": [283, 207, 163, 191], "K.pneumoniae": [232, 168, 124, 155], "S.pneumoniae": [345, 334, 394, 481], "H.pylori": [434, 401, 342, 326]}, index = ["G", "A", "T", "C"]) # A different version of the above dataframe df2 = pd.DataFrame({"G":[246,283,434,232,239,345], "A":[190,207,401,168,213,334], "T":[151,163,342,124,178,394], "C":[147,191,326,155,160,481], "organism":["A.baumanii","E.coli","H.pylori","K.pneumoniae" ,"S.aureus","S.pneumoniae"]}) print (df) print ("") print (df2) # - # %store df # %store df2
storedf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os from tqdm.notebook import tqdm from pathlib import Path import pandas as pd from nltk.lm import Vocabulary import pickle import sys import torch import importlib from timeit import default_timer as t sys.path.append("../../lib") from metrics import levenshtein import ocr_correction from pytorch_decoding import seq2seq folder = "../../data/ICDAR2019_POCR_competition_dataset/ICDAR2019_POCR_competition_evaluation_4M_without_Finnish/ES/" files = sorted(os.listdir(folder)) len(files) # + import glob files = glob.glob(folder + '/**/*.txt', recursive=True) len(files) # - data = [] for f in tqdm(files): with open(f) as file: data.append(file.readlines()) # + data = pd.DataFrame(data, columns = ["ocr_to_input", "ocr_aligned", "gs_aligned"])\ .assign(ocr_to_input = lambda df: df.ocr_to_input.str.replace("[OCR_toInput] ", "", regex = False), ocr_aligned = lambda df: df.ocr_aligned.str.replace("[OCR_aligned] ", "", regex = False), gs_aligned = lambda df: df.gs_aligned.str.replace("[ GS_aligned] ", "", regex = False)) print(data.shape) data.head() # - data.applymap(len).describe() vocabulary = Vocabulary(data.ocr_to_input.sum() + data.ocr_aligned.sum() + data.gs_aligned.sum()) print(len(vocabulary)) distances = levenshtein(reference = data.gs_aligned.str.replace("@", ""), hypothesis = data.ocr_to_input) distances.cer.describe() distances = levenshtein(reference = data.gs_aligned, hypothesis = data.ocr_to_input) distances.cer.describe() data.to_pickle("../../data/es/data/test.pkl")
notebooks/es/5_test_set.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def fatorial(n, show=False): ''' -> Calcula o Fatorial de um número :param n: O número a ser calculado :param show: (opicional) Mostrar ou não a conta. :return: O valor do Fatorial ''' f = 1 for c in range(n, 0, -1): if show: print(f'{c}', end='') if c > 1: print(' x ', end='') else: print(' = ', end='') f *= c return f print(fatorial(5, show=True)) # -
.ipynb_checkpoints/EX102 - Função para Fatorial-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="rtiYQNdwGeFs" # # QuickDraw Data # # If machine learning is rocket science then data is your fuel! So before # doing anything we will have a close look at the data available and spend # some time bringing it into the "right" form (i.e. # [tf.train.Example](https://www.tensorflow.org/api_docs/python/tf/train/Example)). # # That's why we start by spending quite a lot of time on this notebook, downloading # the data, understanding it, and transforming it into the right format for # Tensorflow. # # The data used in this workshop is taken from Google's quickdraw (click on # the images to see loads of examples): # # https://quickdraw.withgoogle.com/data # # We will download the data below. # + [markdown] id="wLhwRewZcVl8" colab_type="text" # ## Init # + [markdown] id="Sv44gRqoyCBi" colab_type="text" # First, we'll choose where our data should be stored. # # If you choose a path under **"/content/gdrive/My Drive"** then data will be stored in your Google drive and persisted across VM starts (preferable). # + id="twZEazVEiS4W" colab_type="code" colab={} data_path = '/content/gdrive/My Drive/amld_data' # Alternatively, you can also store the data in a local directory. This method # will also work when running the notebook in Jupyter instead of Colab. # data_path = './amld_data # + id="HPH_5nWRfTfH" colab_type="code" outputId="870f4a8f-beaa-442d-d454-a9d2b1e2bd3d" executionInfo={"status": "ok", "timestamp": 1579903238813, "user_tz": 480, "elapsed": 42334, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 125} if data_path.startswith('/content/gdrive/'): from google.colab import drive assert data_path.startswith('/content/gdrive/My Drive/'), 'Google Drive paths must start with "/content/gdrive/My Drive/"!' drive.mount('/content/gdrive') if data_path.startswith('gs://'): from google.colab import auth auth.authenticate_user() # + id="54oWbpOKxU6u" colab_type="code" outputId="947e1ab9-ff1c-40cb-ac6c-2ec3b45a0f9b" executionInfo={"status": "ok", "timestamp": 1579903238814, "user_tz": 480, "elapsed": 42327, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # In Jupyter, you would need to install TF 2 via !pip. # %tensorflow_version 2.x # + id="3vi-RvO1w0VJ" colab_type="code" outputId="7a37cfd2-2619-4792-f35e-2af4e5af0681" executionInfo={"status": "ok", "timestamp": 1579903246072, "user_tz": 480, "elapsed": 49577, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # Always make sure you are using running the expected version. # There are considerable differences between versions. # This Colab was tested with 2.1.0. import tensorflow as tf tf.__version__ # + colab_type="code" id="EBkp94O9GeFt" colab={} import base64, collections, io, itertools, functools, json, os, random, re, textwrap, time, urllib, xml import numpy as np import pandas as pd from matplotlib import pyplot as plt from PIL import Image, ImageDraw from IPython import display # + [markdown] colab_type="text" id="eY81Xe9CGeFz" # ## Get the data # # In this section we download a set of raw data files from the web. # + colab_type="code" id="ujcwY2WRGeFz" outputId="a8c63d70-29d8-4ec7-88ac-b9d0ee02386b" executionInfo={"status": "ok", "timestamp": 1579903246074, "user_tz": 480, "elapsed": 49568, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 550} # Retrieve list of categories. def list_bucket(bucket, regexp='.*'): """Returns a filtered list of Keys in specified GCS bucket.""" keys = [] fh = urllib.request.urlopen('https://storage.googleapis.com/%s' % bucket) content = xml.dom.minidom.parseString(fh.read()) for e in content.getElementsByTagName('Contents'): key = e.getElementsByTagName('Key')[0].firstChild.data if re.match(regexp, key): keys.append(key) return keys all_ndjsons = list_bucket('quickdraw_dataset', '.*ndjson$') print('available: (%d)' % len(all_ndjsons)) print('\n'.join(textwrap.wrap( '|'.join([key.split('/')[-1].split('.')[0] for key in all_ndjsons]), width=100))) # + colab_type="code" id="MjaUHJ7zGeF3" colab={} # Mini group of two animals. pets = ['cat', 'dog'] # Somewhat larger group of zoo animals. zoo = ['camel', 'crocodile', 'dolphin', 'elephant', 'flamingo', 'giraffe', 'kangaroo', 'lion', 'monkey', 'penguin', 'rhinoceros'] # Even larger group of all animals. animals = ['ant', 'bat', 'bear', 'bee', 'bird', 'butterfly', 'camel', 'cat', 'cow', 'crab', 'crocodile', 'dog', 'dolphin', 'dragon', 'duck', 'elephant', 'fish', 'flamingo', 'frog', 'giraffe', 'hedgehog', 'horse', 'kangaroo', 'lion', 'lobster', 'monkey', 'mosquito', 'mouse', 'octopus', 'owl', 'panda', 'parrot', 'penguin', 'pig', 'rabbit', 'raccoon', 'rhinoceros', 'scorpion', 'sea turtle', 'shark', 'sheep', 'snail', 'snake', 'spider', 'squirrel', 'swan'] # + [markdown] id="6Df5Br4Pyb9b" colab_type="text" # Create your own group -- the more categories you include the more challenging the classification task will be... # + id="V-wVsOV4yZlA" colab_type="code" colab={} # YOUR ACTION REQUIRED: # Choose one of above groups for remainder of workshop. # Note: This will result in ~100MB of download per class. # `dataset_name` will be used to construct directories containing the data. labels, dataset_name = zoo, 'zoo' # + colab_type="code" id="tmzXx5skGeF5" outputId="7fa595c6-f342-4baa-9f47-9bb176afd02b" executionInfo={"status": "ok", "timestamp": 1579903268257, "user_tz": 480, "elapsed": 71736, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 674} # Download above chosen group. def valid_ndjson(filename): """Checks presence + completeness of .ndjson file.""" try: json.loads(tf.io.gfile.GFile(filename).readlines()[-1]) return True except (ValueError, IOError): return False def retrieve(bucket, key, filename): """Returns a file specified by its Key from a GCS bucket.""" url = 'https://storage.googleapis.com/%s/%s' % ( bucket, urllib.parse.quote(key)) print('\n' + url) if not tf.io.gfile.exists(filename): with tf.io.gfile.GFile(filename, 'w') as f: f.write(urllib.request.urlopen(url).read()) while not valid_ndjson(filename): print('*** Corrupted download (%.2f MB), retrying...' % ( os.path.getsize(filename) / 2.**20)) with tf.io.gfile.GFile(filename, 'w') as f: f.write(urllib.request.urlopen(url).read()) tf.io.gfile.makedirs(data_path) print('\n%d labels:' % len(labels)) for name in labels: print(name, end=' ') dst = '%s/%s.ndjson' % (data_path, name) retrieve('quickdraw_dataset', 'full/simplified/%s.ndjson' % name, dst) print('%.2f MB' % (tf.io.gfile.stat(dst).length / 2.**20)) print('\nDONE :)') # + [markdown] colab_type="text" id="ZO6tp_h-GeF8" # ## Inspect the data # # Let's find out what the format of the downloaded files is. # # First, we are going to enumerate them. # + colab_type="code" id="Xb_2LxbMGeF_" outputId="14df88ad-b83d-4a47-cbeb-88a81262e075" executionInfo={"status": "ok", "timestamp": 1579903272653, "user_tz": 480, "elapsed": 76124, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 265} print('\n'.join([ '%6.1fM : %s' % (tf.io.gfile.stat(path).length/1024**2, path) for path in tf.io.gfile.glob('{}/*.ndjson'.format(data_path)) ])) # + [markdown] id="HBz4Bn90yyWX" colab_type="text" # Let's further explore what the `NDJSON` file format is. # + colab_type="code" id="UCL46qhKGeGC" outputId="9723a47c-f61e-492b-b1d3-21859317adc6" executionInfo={"status": "ok", "timestamp": 1579903272654, "user_tz": 480, "elapsed": 76117, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 90} path = sorted(tf.io.gfile.glob(os.path.join(data_path, '*.ndjson')))[0] print(path) print(tf.io.gfile.GFile(path).read()[:1000] + '...') # + [markdown] id="tAVekUf8y7dx" colab_type="text" # As we can see, it's a format that contains one JSON dictionary per line. # # Let's parse one single line. # + colab_type="code" id="f6m9rZzjGeGG" outputId="ce19d037-73b0-4dd4-a607-0d6abd3247d5" executionInfo={"status": "ok", "timestamp": 1579903272655, "user_tz": 480, "elapsed": 76111, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34} data_json = json.loads(tf.io.gfile.GFile(path).readline()) data_json.keys() # + colab_type="code" id="XKyZ2P4KGeGJ" outputId="cc73541d-5c07-477d-c0ed-91d9b26cb42e" executionInfo={"status": "ok", "timestamp": 1579903272656, "user_tz": 480, "elapsed": 76105, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 105} # So we have some meta information. for k, v in data_json.items(): if k != 'drawing': print('%20s -> %s' % (k, v)) # + colab_type="code" id="S8uCmlQ_GeGN" outputId="a8dda433-3b6d-4a47-d600-a5dffc503872" executionInfo={"status": "ok", "timestamp": 1579903272656, "user_tz": 480, "elapsed": 76098, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 52} # Extract the actual drawing. drawing = data_json['drawing'] # The drawing consists of a series of strokes: print('Shapes:', [np.array(stroke).shape for stroke in drawing]) print('Example stroke:', drawing[0]) # + colab_type="code" id="bb7XhpB4GeGQ" outputId="bb311de2-cd05-4a0f-e910-809f6a3d649c" executionInfo={"status": "ok", "timestamp": 1579903273057, "user_tz": 480, "elapsed": 76490, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 264} # Draw the image -- the strokes all have have shape (2, n) # so the first index seems to be x/y coordinate: for stroke in drawing: # Each array has X coordinates at [0, :] and Y coordinates at [1, :]. plt.plot(np.array(stroke[0]), -np.array(stroke[1])) # Would YOU recognize this drawing successfully? # + colab_type="code" id="1wwxryjLGeGU" outputId="e8b637e6-9273-43e9-a816-26ba583d1997" executionInfo={"status": "ok", "timestamp": 1579903273058, "user_tz": 480, "elapsed": 76484, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # Some more code to load many sketches at once. # Let's ignore the difficult `unrecognized` sketches for now # (i.e. unrecognized by the official quickdraw classifier). def convert(line): """Converts single JSON line and converts 'drawing' to list of np.array.""" d = json.loads(line) d['drawing'] = [np.array(stroke) for stroke in d['drawing']] return d def loaditer(name, unrecognized=False): """Returns iterable of drawings in specified file. Args: name: Name of the downloaded object (e.g. "elephant"). unrecognized: Whether to include drawings that were not recognized by Google AI (i.e. the hard ones). """ for line in tf.io.gfile.GFile('%s/%s.ndjson' % (data_path, name)): d = convert(line) if d['recognized'] or unrecognized: yield d def loadn(name, n, unrecognized=False): """Returns list of drawings. Args: name: Name of the downloaded object (e.g. "elephant"). n: Number of drawings to load. unrecognized: Whether to include drawings that were not recognized by Google AI (i.e. the hard ones). """ it = loaditer(name, unrecognized=unrecognized) return list(itertools.islice(it, 0, n)) n = 100 print('Loading {} instances of "{}"...'.format(n, labels[0]), end='') sample = loadn(labels[0], 100) print('done.') # + colab_type="code" id="-jzTKBt5GeGY" outputId="5a046047-ce24-4eb4-dca9-572e1f424fd4" executionInfo={"status": "ok", "timestamp": 1579903274729, "user_tz": 480, "elapsed": 78148, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 536} # Some more drawings. rows, cols = 3, 3 plt.figure(figsize=(3*cols, 3*rows)) for y in range(rows): for x in range(cols): i = y * cols + x plt.subplot(rows, cols, i + 1) for stroke in sample[i]['drawing']: plt.plot(np.array(stroke[0]), -np.array(stroke[1])) # + [markdown] colab_type="text" id="DF93HB1aGeGb" # ## Rasterize # # Idea: After converting the raw drawing data into rasterized images, we can # use [MNIST](https://www.tensorflow.org/tutorials/quickstart/beginner)-like # image processing to classify the drawings. # + colab_type="code" id="AVsC-4hcGeGc" colab={} def dict_to_img(drawing, img_sz=64, lw=3, maximize=True): """Converts QuickDraw data to quadratic rasterized image. Args: drawing: Dictionary instance of QuickDraw dataset. img_sz: Size output image (in pixels). lw: Line width (in pixels). maximize: Whether to maximize drawing within image pixels. Returns: A PIL.Image with the rasterized drawing. """ img = Image.new('L', (img_sz, img_sz)) draw = ImageDraw.Draw(img) lines = np.array([ stroke[0:2, i:i+2] for stroke in drawing['drawing'] for i in range(stroke.shape[1] - 1) ], dtype=np.float32) if maximize: for i in range(2): min_, max_ = lines[:,i,:].min() * 0.95, lines[:,i,:].max() * 1.05 lines[:,i,:] = (lines[:,i,:] - min_) / max(max_ - min_, 1) else: lines /= 1024 for line in lines: draw.line(tuple(line.T.reshape((-1,)) * img_sz), fill='white', width=lw) return img # + colab_type="code" id="K4GzMB12GeGf" outputId="0462e537-6293-4ecf-c2bb-8fe508a9dd39" executionInfo={"status": "ok", "timestamp": 1579903274730, "user_tz": 480, "elapsed": 78138, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/<KEY>", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 742} # Show some examples. def showimg(img): """Shows an image with an inline HTML <img> tag. Args: img: Can be a PIL.Image or a numpy.ndarray. """ if isinstance(img, np.ndarray): img = Image.fromarray(img, 'L') b = io.BytesIO() img.convert('RGB').save(b, format='png') enc = base64.b64encode(b.getvalue()).decode('utf-8') display.display(display.HTML( '<img src="data:image/png;base64,%s">' % enc)) # Fetch some images + shuffle order. rows, cols = len(labels), 10 n_per_class = rows * cols // len(labels) + 1 drawings_list = [drawing for name in labels for drawing in loadn(name, cols)] # Create mosaic of rendered images. lw = 4 img_sz = 64 tableau = np.zeros((img_sz * rows, img_sz * cols), dtype=np.uint8) for y in range(rows): for x in range(cols): i = y * cols + x img = dict_to_img(drawings_list[i], img_sz=img_sz, lw=lw, maximize=True) tableau[y*img_sz:(y+1)*img_sz, x*img_sz:(x+1)*img_sz] = np.asarray(img) showimg(tableau) print('{} samples of : {}'.format(cols, ' '.join(labels))) # + [markdown] colab_type="text" id="gW40he1tGeGi" # ## Protobufs and tf.train.Example # # Tensorflow's "native" format for data storage is the `tf.train.Example` # [protocol buffer](https://en.wikipedia.org/wiki/Protocol_Buffers). # # In this section we briefly explore the API needed to access the data # inside the `tf.train.Example` protocol buffer. It's **not necessary** to read # through the # [Protocol Buffer Basics: Python - documentation](https://developers.google.com/protocol-buffers/docs/pythontutorial). # + colab_type="code" id="BB8g-Tb1GeGn" outputId="77288829-bda7-4dde-eeaa-202937481d09" executionInfo={"status": "ok", "timestamp": 1579903274730, "user_tz": 480, "elapsed": 78130, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 443} # Create a new (empty) instance. example = tf.train.Example() # An empty example will not print anything. print(example) # An example contains a map from feature name to "Feature". # Every "Feature" contains a list of elements of the same # type, which is one of: # - bytes_list (similar to Python's "str") # - float_list (float number) # - int64_list (integer number) # These values can be accessed as follows (no need to understand # details): # Add float value "3.1416" to feature "magic_numbers" example.features.feature['magic_numbers'].float_list.value.append(3.1416) # Add some more values to the float list "magic_numbers". example.features.feature['magic_numbers'].float_list.value.extend([2.7183, 1.4142, 1.6180]) ### YOUR ACTION REQUIRED: # Create a second feature named "adversaries" and add the elements # b'Alice' and b'Bob'. example.features.feature['adversaries']. # This will now print a serialized representation of our protocol buffer # with features "magic_numbers" and "adversaries" set... print(example) # .. et voila : that's all you need to know about protocol buffers for this # workshop. # + [markdown] colab_type="text" id="33imDs6YGeGq" # ## Create datasets # # Now let's create a "dataset" of `tf.train.Example` # [protocol buffers](https://developers.google.com/protocol-buffers/) ("protos"). # # A single example will contain all the information we want to use for training for a drawing (i.e. rasterized # image, label, and maybe other information). # + colab_type="code" id="Ef_i5QFWGeGq" outputId="dd2a396b-d604-4c37-8c10-8a071a01e7b0" executionInfo={"status": "ok", "timestamp": 1579903365138, "user_tz": 480, "elapsed": 168530, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 212} # Let's first check how many [recognized=True] examples we have in each class. for name in labels: num_all_samples = len(list(tf.io.gfile.GFile('%s/%s.ndjson' % (data_path, name)))) num_recognized_samples = len(list(loaditer(name))) print(name, num_all_samples, 'recognized', num_recognized_samples) # + [markdown] id="146TnInkYMhN" colab_type="text" # **Sharding** # # A dataset consists of non-overlapping sets of examples that will be used for # training and evaluation of the classifier (the "test" set will be used for the # final evaluation). As these files can quickly become very large, we split them into smaller files referred to as *shards*. # For example, we could split a single dataset into a number of *shards*, like # * train-00000-of-00005, # * train-00001-of-00005, # * ..., # * train-00004-of-00005 (if we're using 5 shards). # # This way we have smaller individual files, and we can also easily access for example only 20% of all data, or have 5 threads which read through all the data # simultaneously. # # Generally, with large datasets, a recommendation is to split data into individual *shards* with a size of ~100 MB each. This workshop might use smaller sharding sizes for simplicity reasons. # + colab_type="code" id="r4h9W4JvGeGt" cellView="both" colab={} #@title `make_sharded_files()` code #@markdown Helper code to create sharded recordio files. #@markdown Simply **click "execute"** and continue to the next cell. #@markdown No need to read through this code to understand the remainder of the Colab. #@markdown #@markdown If you want to have a look anyways, you can double-click this cell or click on the three dots #@markdown and then select "Form" and then "Show Code" (shortcut `<Ctrl-M> <F>`). # Helper code to create sharded recordio files. # (No need to read through this.) # The code in this cell simply takes a list of iterators and then # randomly distributes the values returned by these iterators into sharded # datasets (e.g. a train/eval/test split). def rand_key(counts): """Returns a random key from "counts", using values as distribution.""" r = random.randint(0, sum(counts.values())) for key, count in counts.items(): if r > count or count == 0: r -= count else: counts[key] -= 1 return key def get_split(i, splits): """Returns key from "splits" for iteration "i".""" i %= sum(splits.values()) for split in sorted(splits): if i < splits[split]: return split i -= splits[split] def make_counts(labels, total): """Generates counts for "labels" totaling "total".""" counts = {} for i, name in enumerate(labels): counts[name] = total // (len(labels) - i) total -= counts[name] return counts def example_to_dict(example): """Converts a tf.train.Example to a dictionary.""" example_dict = {} for name, value in example.features.feature.items(): if value.HasField('bytes_list'): value = value.bytes_list.value elif value.HasField('int64_list'): value = value.int64_list.value elif value.HasField('float_list'): value = value.float_list.value else: raise 'Unknown *_list type!' if len(value) == 1: example_dict[name] = value[0] else: example_dict[name] = np.array(value) return example_dict def make_sharded_files(make_example, path, labels, iters, counts, splits, shards=10, overwrite=False, report_dt=10, make_df=False): """Create sharded dataset from "iters". Args: make_example: Converts object returned by elements of "iters" to tf.train.Example() proto. path: Directory that will contain recordio files. labels: Names of labels, will be written to "labels.txt". iters: List of iterables returning drawing objects. counts: Dictionary mapping class to number of examples. splits: Dictionary mapping filename to multiple examples. For example, splits=dict(a=2, b=1) will result in two examples being written to "a" for every example being written to "b". shards: Number of files to be created per split. overwrite: Whether a pre-existing directory should be overwritten. report_dt: Number of seconds between status updates (0=no updates). make_df: Also write data as pandas.DataFrame - do NOT use this with very large datasets that don't fit in memory! Returns: Total number of examples written to disk per split. """ assert len(iters) == len(labels) # Prepare output. if not tf.io.gfile.exists(path): tf.io.gfile.makedirs(path) paths = { split: ['%s/%s-%05d-of-%05d' % (path, split, i, shards) for i in range(shards)] for split in splits } assert overwrite or not tf.io.gfile.exists(paths.values()[0][0]) writers = { split: [tf.io.TFRecordWriter(ps[i]) for i in range(shards)] for split, ps in paths.items() } t0 = time.time() examples_per_split = collections.defaultdict(int) i, n = 0, sum(counts.values()) counts = dict(**counts) rows = [] # Create examples. while sum(counts.values()): name = rand_key(counts) split = get_split(i, splits) writer = writers[split][examples_per_split[split] % shards] label = labels.index(name) example = make_example(label, next(iters[label])) writer.write(example.SerializeToString()) if make_df: example.features.feature['split'].bytes_list.value.append(split.encode('utf8')) rows.append(example_to_dict(example)) examples_per_split[split] += 1 i += 1 if report_dt > 0 and time.time() - t0 > report_dt: print('processed %d/%d (%.2f%%)' % (i, n, 100. * i / n)) t0 = time.time() # Store results. for split in splits: for writer in writers[split]: writer.close() with tf.io.gfile.GFile('%s/labels.txt' % path, 'w') as f: f.write('\n'.join(labels)) with tf.io.gfile.GFile('%s/counts.json' % path, 'w') as f: json.dump(examples_per_split, f) if make_df: df_path = '%s/dataframe.pkl' % path print('Writing %s...' % df_path) pd.DataFrame(rows).to_pickle(df_path) return dict(**examples_per_split) # + [markdown] id="Ecj5-3EGKXOd" colab_type="text" # ### Create IMG dataset # + colab_type="code" id="uPF9vIipGeGv" colab={} # Uses dict_to_img() from previous cell to create raster image. def make_example_img(label, drawing): """Converts QuickDraw dictionary to example with rasterized data. Args: label: Numerical representation of the label (e.g. '0' for labels[0]). drawing: Dictionary with QuickDraw data. Returns: A tf.train.Example protocol buffer (with 'label', 'img_64', and additional metadata features). """ example = tf.train.Example() example.features.feature['label'].int64_list.value.append(label) img_64 = np.asarray(dict_to_img( drawing, img_sz=64, lw=4, maximize=True)).reshape(-1) example.features.feature['img_64'].int64_list.value.extend(img_64) example.features.feature['countrycode'].bytes_list.value.append( drawing['countrycode'].encode()) example.features.feature['recognized'].int64_list.value.append( drawing['recognized']) example.features.feature['word'].bytes_list.value.append( drawing['word'].encode()) ts = drawing['timestamp'] ts = time.mktime(time.strptime(ts[:ts.index('.')], '%Y-%m-%d %H:%M:%S')) example.features.feature['timestamp'].int64_list.value.append(int(ts)) example.features.feature['key_id'].int64_list.value.append( int(drawing['key_id'])) return example # + [markdown] id="VhCUj9uGiPov" colab_type="text" # We will now create a dataset with 80k samples consisting of: # # * 50k samples used for training # * 20k samples used for evaluation # * 10k samples used for testing # # The generation below will take about **~6 minutes**. # # > **Note:** Larger datasets take longer to generate and to train on, but also lead to better classification results. # # # + colab_type="code" id="ksR9Hw0DGeGy" outputId="6813a6fd-a2a3-41b6-fcfb-1fd6509b2e8f" executionInfo={"status": "ok", "timestamp": 1579903779467, "user_tz": 480, "elapsed": 299570, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 461} # Create the (rasterized) dataset. path = '%s/%s_img' % (data_path, dataset_name) t0 = time.time() examples_per_split = make_sharded_files( make_example=make_example_img, path=path, labels=labels, iters=[loaditer(name) for name in labels], # Creating 50k train, 20k eval and 10k test examples. counts=make_counts(labels, 80000), splits=dict(train=5, eval=2, test=1), overwrite=True, # Note: Set this to False when generating large datasets. make_df=True, ) # If you don't see the final output below, it's probably because your VM # has run out of memory and crashed! # This can happen when make_df=True. print('stored data to "%s"' % path) print('generated %s examples in %d seconds' % ( examples_per_split, time.time() - t0)) # + [markdown] colab_type="text" id="s-MmsFVFGeG1" # ### Create STROKE dataset # # This section creates another dataset of example protos that contain the raw # stroke data, suitable for usage with a recurrent neural network. # + colab_type="code" id="c1SCR8h1GeG2" colab={} # Convert stroke coordinates into normalized relative coordinates, # one single list, and add a "third dimension" that indicates when # a new stroke starts. def dict_to_stroke(d): norm = lambda x: (x - x.min()) / max(1, (x.max() - x.min())) xy = np.concatenate([np.array(s, dtype=np.float32) for s in d['drawing']], axis=1) z = np.zeros(xy.shape[1]) if len(d['drawing']) > 1: z[np.cumsum(np.array(list(map(lambda x: x.shape[1], d['drawing'][:-1]))))] = 1 dxy = np.diff(norm(xy)) return np.concatenate([dxy, z.reshape((1, -1))[:, 1:]]) # + colab_type="code" id="QC1U8kZ8GeG4" outputId="e1e59afd-d5c0-401b-e3df-f5332e11f56b" executionInfo={"status": "ok", "timestamp": 1579903779774, "user_tz": 480, "elapsed": 299849, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 264} # Visualize and control output of dict_to_stroke(). stroke = dict_to_stroke(sample[0]) # The first 2 dimensions are normalized dx/dy coordinates, and # the third dimension indicates a new stroke. xy = stroke[:2, :].cumsum(axis=1) plt.plot(xy[0,:], -xy[1,:]) pxy = xy[:, stroke[2] != 0] # Indicate the new stroke with a red circle. plt.plot(pxy[0], -pxy[1], 'ro'); # + colab_type="code" id="rxzZZyR9GeG8" colab={} # Uses dict_to_stroke() from previous cell to create raster image. def make_example_stroke(label, drawing): """Converts QuickDraw dictionary to example with stroke data. Args: label: Numerical representation of the label (e.g. '0' for labels[0]). drawing: Dictionary with QuickDraw data. Returns: A tf.train.Example protocol buffer (with 'label', 'stroke_x', 'stroke_y', 'stroke_z', and additional metadata features). """ example = tf.train.Example() example.features.feature['label'].int64_list.value.append(label) stroke = dict_to_stroke(drawing) example.features.feature['stroke_x'].float_list.value.extend(stroke[0, :]) example.features.feature['stroke_y'].float_list.value.extend(stroke[1, :]) example.features.feature['stroke_z'].float_list.value.extend(stroke[2, :]) example.features.feature['stroke_len'].int64_list.value.append( stroke.shape[1]) example.features.feature['countrycode'].bytes_list.value.append( drawing['countrycode'].encode()) example.features.feature['recognized'].int64_list.value.append( drawing['recognized']) example.features.feature['word'].bytes_list.value.append( drawing['word'].encode()) ts = drawing['timestamp'] ts = time.mktime(time.strptime(ts[:ts.index('.')], '%Y-%m-%d %H:%M:%S')) example.features.feature['timestamp'].int64_list.value.append(int(ts)) example.features.feature['key_id'].int64_list.value.append( int(drawing['key_id'])) return example # + id="A7LyQXCv3ADO" colab_type="code" outputId="76dc5d47-5874-41d4-e3ef-895a17f95712" executionInfo={"status": "ok", "timestamp": 1579903846019, "user_tz": 480, "elapsed": 366071, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 158} path = '%s/%s_stroke' % (data_path, dataset_name) t0 = time.time() examples_per_split = make_sharded_files( make_example=make_example_stroke, path=path, labels=labels, iters=[loaditer(name) for name in labels], # Creating 50k train, 20k eval, 10k test examples. Takes ~2min counts=make_counts(labels, 80000), splits=dict(train=5, eval=2, test=1), overwrite=True, # Note : Set this to False when generating large datasets... make_df=True, ) print('stored data to "%s"' % path) print('generated %s examples in %d seconds' % (examples_per_split, time.time() - t0)) # + [markdown] id="Kqkg17PxIhrJ" colab_type="text" # # ----- Optional part ----- # + [markdown] id="V1oCjDZhb2do" colab_type="text" # ## Inspect data # + id="8aUJmZuUb5JH" colab_type="code" outputId="03846b0c-1fe8-4c20-de0f-7852112282ee" executionInfo={"status": "ok", "timestamp": 1579903847627, "user_tz": 480, "elapsed": 367661, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} # YOUR ACTION REQUIRED: # Check out the files generated in $data_path # Note that you can also inspect the files in http://drive.google.com if you # used Drive as the destination. # + id="jq3yJWKMcQdV" colab_type="code" outputId="3e2c92d0-ead5-4693-e09f-9f69070cb5c6" executionInfo={"status": "ok", "timestamp": 1579903848495, "user_tz": 480, "elapsed": 368492, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 54} # Let's look at a single file of the sharded dataset. tf_record_path = '{}/{}_img/eval-00000-of-00010'.format(data_path, dataset_name) # YOUR ACTION REQUIRED: # Use tf.data.TFRecordDataset() to read a single record from the file and assign # it to the variable `record`. # What data type has this record? #record = ... #record # + id="KvAUIAn6d5ow" colab_type="code" outputId="3676bf6a-ba7b-408f-d722-7f04110c81da" executionInfo={"status": "ok", "timestamp": 1579903848496, "user_tz": 480, "elapsed": 368461, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # Check out the features. They should correspond to what we generated in # make_example_img() above. example = tf.train.Example() # Note: .numpy() returns the underlying string from the Tensor. example.ParseFromString(record.numpy()) print(list(example.features.feature.keys())) # + id="BDGmgNQ7dgOz" colab_type="code" colab={} # YOUR ACTION REQUIRED: # Extract the label and the image data from the example protobuf. # Use above section "tf.train.Example" for reference. label_int = img_64 = # + id="3e0Dwc09efDD" colab_type="code" outputId="3fefac4a-eb14-4658-eab1-bebf826ecbd5" executionInfo={"status": "ok", "timestamp": 1579903848497, "user_tz": 480, "elapsed": 368435, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 310} # Visualize the image: print(labels[label_int]) plt.matshow(np.array(img_64).reshape((64, 64))) # + id="YhfIKXXChrab" colab_type="code" outputId="0b45a295-d459-4427-f2f3-6ccf1d018a6b" executionInfo={"status": "ok", "timestamp": 1579903849648, "user_tz": 480, "elapsed": 369575, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 353} # YOUR ACTION REQUIRED: # Check that we have an equal distribution of labels in the training files. # + [markdown] id="lI01xshyjTbH" colab_type="text" # ## More on protobufs # + id="aGej8L70W7AH" colab_type="code" outputId="40ed9b94-1b7b-4634-f3b0-bfd64714005a" executionInfo={"status": "ok", "timestamp": 1579903862951, "user_tz": 480, "elapsed": 382847, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} # If we want to create our own protocol buffers, we first need to install # some programs. # !apt-get -y install protobuf-compiler python-pil python-lxml # + id="lkF7qlk3XwA2" colab_type="code" colab={} # Step 1: Write a proto file that describes our data format. # YOUR ACTION REQUIRED: Complete the definition of the "Person" message (you # can use the slide for inspiration). with open('person.proto', 'w') as f: f.write('''syntax = "proto3";''') # + id="pBAJaBXBXwrH" colab_type="code" outputId="e70c9c7d-5a56-46ee-a701-4daf534a51a6" executionInfo={"status": "ok", "timestamp": 1579903866381, "user_tz": 480, "elapsed": 386239, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 105} # Step 2: Compile proto definition to a Python file. # !protoc --python_out=. person.proto # !ls -lh # + id="2T9fzSImX_M1" colab_type="code" colab={} # Step 3: Import code from generated Python file. from person_pb2 import Person # + id="gvOO-6qxX_8E" colab_type="code" outputId="69c76c4b-c12a-4d00-8138-1c542d44565e" executionInfo={"status": "ok", "timestamp": 1579903866383, "user_tz": 480, "elapsed": 386220, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 34} person = Person() person.name = '<NAME>' person.email = '<EMAIL>' person.lucky_numbers.extend([13, 99]) person.SerializeToString() # + id="jyrH9BxLZsrf" colab_type="code" outputId="2a401029-0154-4ad8-e3ed-1cd8eb660d1d" executionInfo={"status": "ok", "timestamp": 1579903866383, "user_tz": 480, "elapsed": 386207, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAYx167H2vNmFSKlsQkQY-bjbJ-3sPGymaG0kXO=s64", "userId": "08860260976100898876"}} colab={"base_uri": "https://localhost:8080/", "height": 52} # YOUR ACTION REQUIRED: # Compare the size of the serialized person structure in proto format # vs. JSON encoded (you can use Python's json.dumps() and list members # manually, or import google.protobuf.json_format). # Which format is more efficient? Why? # Which format is easier to use? # Which format is more versatile?
extras/amld/notebooks/exercises/1_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:cattrainer] # language: python # name: conda-env-cattrainer-py # --- # # Processing MedCATTrainer Annotations # A short notebook to demonstrate the MedCATTrainer downloaded annotations schema. Both w/ and w/o text have the same format, except from the source text. import pandas as pd import json # Load the annotations downloaded - as described: https://github.com/CogStack/MedCATtrainer/blob/master/README.md#download-annos projs = json.load(open('example_data/MedCAT_Export_With_Text_2020-05-22_10_34_09.json'))['projects'] # Number of annotation projects downloaded print(f'Projects annotated:{len(projs)}') # select first project proj = projs[0] # project level cui / tui filters are top level dict keys proj.keys() # + # Annotations are found inside each document. print(f'# of Documents: {len(proj["documents"])}') print(f'# of Annotations: {sum([len(d["annotations"]) for d in proj["documents"]])}') # Annotations that have been marked by a human annotator print(f'# Validated Annotations: {len([a for d in proj["documents"] for a in d["annotations"] if a["validated"] == True])}') # Annotations that have been marked correct - (blue) print(f'# Correct Annotations: {len([a for d in proj["documents"] for a in d["annotations"] if a["correct"] == True])}') # Annotations that have been marked incorrect - (red) print(f'# Correct Annotations: {len([a for d in proj["documents"] for a in d["annotations"] if a["deleted"] == True])}') # Annotations that have been marked terminated - (dark red) print(f'# Correct Annotations: {len([a for d in proj["documents"] for a in d["annotations"] if a["killed"] == True])}') # Annotations that have been marked alternative - (turquoise) print(f'# Correct Annotations: {len([a for d in proj["documents"] for a in d["annotations"] if a["alternative"] == True])}') # Annotations that have been manually created via right-click - 'Add Annotation', these will also be 'correct' == True print(f'# Correct Annotations: {len([a for d in proj["documents"] for a in d["annotations"] if a["manually_created"] == True])}') # - # ### Meta Annotations # Each Meta Annotation will have the names of the task and associated values you've previously selected. # In this case we have: 'Negation' and 'Skip' # + ## Correct Annotations that are Correct and Meta Annotation Temporarilty - Present, Experiencer - Patient # - proj['documents'][1]['annotations'][2]['meta_anns'] annos = [] for doc in proj['documents']: for a in doc['annotations']: meta_anns = a['meta_anns'] if a['correct'] == True and len(meta_anns) != 0: # meta_anns are a list of dictionaries, each dict is a meta annotation. Order is not neccessarily consistent negation = meta_anns['Negation'] skip = meta_anns['Skip'] if negation['value'] == 'No' and skip['value'] == 'Yes': # pull out the doc_name, the text span value, and the concept annos.append({'doc_name': doc['name'], 'anno_value': a['value'], 'cui': a['cui']}) # make DataFrame df = pd.DataFrame(annos) df.head(5) # ### Comparing a Second (or More) set of Annotations # Often we'll dual annotate projects and compute metrics to develop a gold standard. # - We'll compute metrics such [Inter Annotator Agreement (IIA)](https://en.wikipedia.org/wiki/Inter-rater_reliability) and [Cohen's Kappa](https://en.wikipedia.org/wiki/Cohen%27s_kappa). # - Metrics can be output for each concept for the concept recognition+linking tasks. # - For tasks with only a handful of concept filters we can compute the meta annotation task agreement, but often we will not have enough annotatinos for any meaningful. Instead we can group all meta annotations together to compute scores. from sklearn.metrics import cohen_kappa_score proj['documents'][0]['annotations'][0] def anno_state(anno): if anno['deleted']: return 'del' if anno['alternative']: return 'alt' if anno['killed']: return 'kil' if anno['manually_created']: return 'man' return 'cor' # + # Concept Recognition + Linking Agreement per CUI across 2 projects # - # only take documents completed by both shared_docs = set([d['id'] for d in projs[0]['documents']]) & set([d['id'] for d in projs[1]['documents']]) projs[0]['documents'] = [d for d in projs[0]['documents'] if d['id'] in shared_docs] projs[1]['documents'] = [d for d in projs[1]['documents'] if d['id'] in shared_docs] # project 1 annos proj1_annos_cuis = {f'{d["id"]}:{a["start"]}': a['cui'] for d in projs[0]['documents'] for a in d['annotations']} proj1_annos_states = {f'{d["id"]}:{a["start"]}': anno_state(a) for d in projs[0]['documents'] for a in d['annotations']} # project 2 annos proj2_annos_cuis = {f'{d["id"]}:{a["start"]}': a['cui'] for d in projs[1]['documents'] for a in d['annotations']} proj2_annos_states = {f'{d["id"]}:{a["start"]}': anno_state(a) for d in projs[1]['documents'] for a in d['annotations']} all_cuis = set(proj1_annos_cuis.values()) | set(proj2_annos_cuis.values()) cui_ck = {} for cui in all_cuis: cui_tuples = [] p1 = {k:v for k,v in proj1_annos_cuis.items() if v == cui} p2 = {k:v for k,v in proj2_annos_cuis.items() if v == cui} for anno_key in set(p1.keys()) | set(p2.keys()): cui_tuples.append((proj1_annos_states.get(anno_key, 'na'), proj2_annos_states.get(anno_key, 'na'))) cui_ck[cui] = cui_tuples # ## IIA Per CUI iia_per_cui = {cui: (len([i for i in v if i[0] == i[1]]) / len(v)) * 100 for cui, v in cui_ck.items()} # ## Cohen's Kappa Per CUI # Note: for cuis with only one label it can be cohens_kappa_per_cui = {k: cohen_kappa_score([i[0] for i in v], [i[1] for i in v]) for k,v in cui_ck.items()} # ### Meta Annotation # - Group all annos together for each task and compute IIA, CK # project 1 meta annos proj1_meta_annos_neg = {f'{d["id"]}:{a["start"]}': a['meta_anns'].get('Negation', {'value': 'na'})['value'] for d in projs[0]['documents'] for a in d['annotations']} proj1_meta_annos_skip = {f'{d["id"]}:{a["start"]}': a['meta_anns'].get('Skip', {'value': 'na'})['value'] for d in projs[0]['documents'] for a in d['annotations']} # project 2 meta annos proj2_meta_annos_neg = {f'{d["id"]}:{a["start"]}': a['meta_anns'].get('Negation', {'value': 'na'})['value'] for d in projs[1]['documents'] for a in d['annotations']} proj2_meta_annos_skip = {f'{d["id"]}:{a["start"]}': a['meta_anns'].get('Skip', {'value': 'na'})['value'] for d in projs[1]['documents'] for a in d['annotations']} # remove na examples, these would be incorret or terminated exampels that have no meta anno value. def remove_na(meta_annos_dict): return {k:v for k,v in meta_annos_dict.items() if v != 'na'} proj1_meta_annos_neg = remove_na(proj1_meta_annos_neg) proj1_meta_annos_skip = remove_na(proj1_meta_annos_skip) proj2_meta_annos_neg = remove_na(proj2_meta_annos_neg) proj2_meta_annos_skip = remove_na(proj2_meta_annos_skip) # + # Take meta annos from each project and combine across projects, # - A more strict measure: defaulting to 'na' if there is no appropriate meta anno in the 'other' project, to use this one swap '&' (intersection) with "|" a union. # - A more fair measure: removing the instance where there was no meta anno in the other project. We use this one below. neg_annos = [] for anno_key in set(proj1_meta_annos_neg.keys()) & set(proj2_meta_annos_neg.keys()): neg_annos.append((proj1_meta_annos_neg.get(anno_key, 'na'), proj2_meta_annos_neg.get(anno_key, 'na'))) skip_annos = [] for anno_key in set(proj1_meta_annos_skip.keys()) & set(proj2_meta_annos_skip.keys()): skip_annos.append(((proj1_meta_annos_skip.get(anno_key, 'na')), proj2_meta_annos_skip.get(anno_key, 'na'))) # - iia_neg = (len([a for a in neg_annos if a[0] == a[1]]) / len(neg_annos)) * 100 print('iia neg:', iia_neg) iia_skip = (len([a for a in skip_annos if a[0] == a[1]]) / len(skip_annos)) * 100 print('iia skip:', iia_skip) ck_neg = cohen_kappa_score([v[0] for v in neg_annos], [v[1] for v in neg_annos]) print("cohen's kappa neg:", ck_neg) ck_skip = cohen_kappa_score([v[0] for v in skip_annos], [v[1] for v in skip_annos]) print("cohen's kappa skip:", ck_skip) # We have 'nan's here as there are no other values exist in the intersection of values so cohen's kappa is undefined. We can report 100% IIA though!
docs/Processing_Annotations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import math from lauztat.parameters import POI from lauztat.hypotests import Discovery from lauztat.calculators import AsymptoticCalculator from lauztat.config import Config import tensorflow as tf import zfit from zfit import ztf # ## Signal + background fit: # + bounds = (0.1, 3.0) # Data and signal np.random.seed(0) tau = -2.0 beta = -1/tau data = np.random.exponential(beta, 300) peak = np.random.normal(1.2, 0.1, 25) data = np.concatenate((data,peak)) data = data[(data > bounds[0]) & (data < bounds[1])] plt.hist(data, bins=100, histtype='step'); # - obs = zfit.Space('x', limits=bounds) mean = zfit.Parameter("mean", 1.2, 0.1, 2., floating=False) sigma = zfit.Parameter("sigma", 0.1, floating=False) lambda_ = zfit.Parameter("lambda",-2.0, -4.0, -1.0) Nsig = zfit.Parameter("Nsig", 20., -20., len(data)) Nbkg = zfit.Parameter("Nbkg", len(data), 0., len(data)*1.1) signal = Nsig * zfit.pdf.Gauss(obs=obs, mu=mean, sigma=sigma) background = Nbkg * zfit.pdf.Exponential(obs=obs, lambda_=lambda_) tot_model = signal + background # Create the negative log likelihood from zfit.core.loss import ExtendedUnbinnedNLL, UnbinnedNLL data_ = zfit.data.Data.from_numpy(obs=obs, array=data) nll = ExtendedUnbinnedNLL(model=[tot_model], data=[data_], fit_range=[obs]) # Load and instantiate a tensorflow minimizer from zfit.minimizers.minimizer_minuit import MinuitMinimizer minimizer = MinuitMinimizer() # Create the minimization graph to minimize mu and sigma and run it (minimize does it directly) minimum = minimizer.minimize(loss=nll) def plotfitresult(pdf, bounds, nbins, data): x = np.linspace(*bounds, num=1000) pdf = zfit.run(tot_model.pdf(x, norm_range=bounds)* tot_model.get_yield()) _ = plt.plot(x, ((bounds[1] - bounds[0])/nbins)*(pdf), "-r", label="fit result") nbins = 80 plt.hist(data, bins=nbins, histtype='step', range=bounds); plotfitresult(tot_model, bounds, nbins, data) plt.xlabel("m [GeV/c$^2$]") plt.ylabel("number of events") plt.savefig("fit_discovery_ex.png") # ## Discovery significance. # + def lossbuilder(model, data, weights=None): loss = ExtendedUnbinnedNLL(model=model, data=data, fit_range=[obs]) return loss config = Config(tot_model, data_, lossbuilder, MinuitMinimizer()) # - config.bestfit calc = AsymptoticCalculator(config) poinull = POI(Nsig, value=0) discovery_test = Discovery(poinull, calc) r = discovery_test.result(); r
examples/notebooks/discovery_zfit_asy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="images/strathsdr_banner.png" align="left"> # # Hardware Accelerated Spectrum Analysis on RFSoC # ---- # # <div class="alert alert-box alert-info"> # Please use Jupyter Labs http://board_ip_address/lab for this notebook. # </div> # # This notebook presents a flexible hardware accelerated Spectrum Analyzer Module for the Zynq UltraScale+ RFSoC. The Spectrum Analyzer Module was developed by the [University of Strathclyde](https://github.com/strath-sdr). # # ## Table of Contents # * [Introduction](#introduction) # * [Hardware Setup](#hardware-setup) # * [Software Setup](#software-setup) # * [Simple Tone Generation](#simple-tone-generation) # * [The Spectrum Analyzer](#the-spectrum-analyzer) # * [A Simple Example](#a-simple-example) # * [Conclusion](#conclusion) # # ## References # * [Xilinx, Inc, "USP RF Data Converter: LogiCORE IP Product Guide", PG269, v2.3, June 2020](https://www.xilinx.com/support/documentation/ip_documentation/usp_rf_data_converter/v2_3/pg269-rf-data-converter.pdf) # # ## Revision History # * **v1.0** | 12/02/2021 | Spectrum analyzer notebook # * **v1.1** | 15/04/2021 | Update spectral resolution and minimum bandwidth with new value # ---- # ## Introduction <a class="anchor" id="introduction"></a> # The Zynq RFSoC contains high frequency samplers known as RF Data Converters (RF DCs). The RF DCs are tightly coupled with the Programmable Logic (PL), creating a high-throughput, low-latency path between the FPGA and analogue world. The Spectrum Analyzer Module employs the RF Analogue-to-Digital Converters (RF ADCs) to receive RF time domain signals. The received data is manipulated using spectral pre-processing techniques in the PL, to prepare it for frequency domain analysis and visualisation in the Processing System (PS). # # A significant portion of the design has been implemented in the RFSoC's PL to prevent the PS from applying highly computational arithemtic. [Figure 1](#fig-1) presents a simple diagram illustrating the system overview for one spectrum analyzer channel. There is a Spectrum Analyzer Module for each available RF ADC channel in the design. The Spectrum Analyzers are also interfaced to their very own flexible decimator, allowing different sample rates to be configured for each channel. # <a class="anchor" id="fig-1"></a> # <figure> # <img src='images/spectrum_analyser_overview.png' height='50%' width='50%'/> # <figcaption><b>Figure 1: The RFSoC Spectrum Analyzer system overview.</b></figcaption> # </figure> # ### Hardware Setup <a class="anchor" id="hardware-setup"></a> # Your ZCU111 development board can host four Spectrum Analyzer Modules. To setup your board for this demonstration, you can connect each channel in loopback as shown in [Figure 2](#fig-2), or connect an antenna to one of the ADC channels. # # Don't worry if you don't have an antenna. The default loopback configuration will still be very interesting and is connected as follows: # * Channel 0: DAC4 (Tile 229 Block 0) to ADC0 (Tile 224 Block 0) # * Channel 1: DAC5 (Tile 229 Block 1) to ADC1 (Tile 224 Block 1) # * Channel 2: DAC6 (Tile 229 Block 2) to ADC2 (Tile 225 Block 0) # * Channel 3: DAC7 (Tile 229 Block 3) to ADC3 (Tile 225 Block 1) # # There has been several XM500 board revisions, and some contain different silkscreen and labels for the ADCs and DACs. Use the image below for further guidance and pay attention to the associated Tile and Block. # # <a class="anchor" id="fig-2"></a> # <figure> # <img src='images/zcu111_setup.png' height='50%' width='50%'/> # <figcaption><b>Figure 2: ZCU111 and XM500 development board setup in loopback mode.</b></figcaption> # </figure> # # If you have chosen to use an antenna, **do not** attach your antenna to any SMA interfaces labelled DAC. # # <div class="alert alert-box alert-danger"> # <b>Caution:</b> # In this demonstration, we generate tones using the RFSoC development board. Your device should be setup in loopback mode. You should understand that the RFSoC platform can also transmit RF signals wirelessly. Remember that unlicensed wireless transmission of RF signals may be illegal in your geographical location. Radio signals may also interfere with nearby devices, such as pacemakers and emergency radio equipment. Note that it is also illegal to intercept and decode particular RF signals. If you are unsure, please seek professional support. # </div> # ### Software Setup <a class="anchor" id="software-setup"></a> # We're nearly finished setting up the demonstration system. The majority of the libraries used by the spectrum analyzer design are contained inside the RFSoC-SAM software package. We only need to run a few code cells to initialise the software environment. # # The primary module for loading the Spectrum Analyzer design is contained inside `rfsoc_sam.overlay`. The class we are interested in using is `Overlay()`. During initialisation the class downloads the Spectrum Analyzer bitstream to the PL and configures the RF DCs and FPGA IP cores contained in our system. This process may take around a minute to complete. # # **Run** the code cell below to load the RFSoC-SAM Overlay class. # + from rfsoc_sam.overlay import Overlay sam = Overlay() # - # When the RFSoC-SAM Overlay class is initialising, the setup script will also program the LMK and LMX low-jitter clock chips on the ZCU111 to 122.8MHz and 409.6MHz respectively. # # Lets now initialise the analyzer, and setup user control. The initialisation process takes around 2 minutes. analyzer = sam.spectrum_analyzer() # ---- # ## Simple Tone Generation <a class="anchor" id="simple-tone-generation"></a> # A simple amplitude controller is required to generate tones using the RF Digital-to-Analogue Converters (RF DACs). We use tone generation in this demonstration to provide a signal for the user to inspect when using the Spectrum Analyzer Module. # # Run the code cell below to reveal a widget, which can be used to control the transmission frequency and amplitude. analyzer.children[2] # ## The Spectrum Analyzer <a class="anchor" id="the-spectrum-analyzer"></a> # We will now explore the hardware accelerated Spectrum Analyzer Module. It is worthwhile noting the analyzers capabilities below: # # * The analyzer is capable of inspecting 1638.4MHz of bandwidth. # * It can achieve a maximum spectral resolution of 0.244140625kHz. # * The bandwidth is adjustable between 1638.4MHz and 1.6MHz. # * The range of inspection is between 0 to 4096MHz using higher order Nyquist techniques. analyzer.children[1] # The Spectrum Analyzer Module contains a hardware accelerated FFT core, which can convert the RF sampled signal to the frequency domain using a range of different FFT lengths, $N = 64$ upto $N = 8192$. The frequency domain signal is further manipulated using a custom floating point processor to obtain the representative Power Spectral Density (PSD) or Power Spectrum. Furthermore, a hardware accelerated decibel (dB) converter is also used to condition the frequency domain signal for visual analysis. # # Through the loopback connection, you should be able to use the Spectrum Analyzer Module to locate the tone you previously generated using the tone generator. If you have an antenna connected to your board, try and locate signals of interest using the Spectrum Analyzer's control widgets. # ### A Simple Example <a class="anchor" id="a-simple-example"></a> # If you would like to enable stimulus for the spectrum analyzer, you can use your mobile phone to create WiFi traffic. Follow the steps below to create an interesting WiFi spectrum to visualise. # * Connect your mobile phone to an access point that uses WiFi. # * Then configure the spectrum analyzer for a centre frequency of 2400MHz and a decimation factor of 16. # * Switch on the spectrum analyzer and spectrogram. # * Use your phone to stream a video, or music. This will create WiFi traffic for inspection. # * Place your phone close to the RF ADC ports of the spectrum analyzer. # # You should see a similar output as given in the [Figure 3](#fig-3) below. # # <a class="anchor" id="fig-3"></a> # <figure> # <img src='images/wifi_example.jpg' height='50%' width='50%'/> # <figcaption><b>Figure 3: Capturing a WiFi signal using the Spectrum Analyser Module.</b></figcaption> # </figure> # ## Conclusion <a class="anchor" id="conclusion"></a> # This notebook has presented a hardware accelerated Spectrum Analyzer Module for the ZCU111 development board.
boards/ZCU111/rfsoc_sam/notebooks/rfsoc_spectrum_analysis.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // # Create your first deep learning neural network // // ## Introduction // // This is the first of our [beginner tutorial series](https://github.com/awslabs/djl/tree/master/jupyter/tutorial) that will take you through creating, training, and running inference on a neural network. In this tutorial, you will learn how to use the built-in `Block` to create your first neural network - a Multilayer Perceptron. // // ## Neural Network // // A neural network is a black box function. Instead of coding this function yourself, you provide many sample input/output pairs for this function. Then, we try to train the network to learn how to match the behavior of the function given only these input/output pairs. A better model with more data can more accurately match the function. // // ## Multilayer Perceptron // // A Multilayer Perceptron (MLP) is one of the simplest deep learning networks. The MLP has an input layer which contains your input data, an output layer which is produced by the network and contains the data the network is supposed to be learning, and some number of hidden layers. The example below contains an input of size 3, a single hidden layer of size 3, and an output of size 2. The number and sizes of the hidden layers are determined through experimentation but more layers enable the network to represent more complicated functions. Between each pair of layers is a linear operation (sometimes called a FullyConnected operation because each number in the input connected to each number in the output by a matrix multiplication). Not pictured, there is also a non-linear activation function after each linear operation. For more information, see [Multilayer Perceptron](https://en.wikipedia.org/wiki/Multilayer_perceptron). // // ![MLP Image](https://upload.wikimedia.org/wikipedia/commons/c/c2/MultiLayerNeuralNetworkBigger_english.png) // // // ## Step 1: Setup development environment // // ### Installation // // This tutorial requires the installation of the Java Jupyter Kernel. To install the kernel, see the [Jupyter README](https://github.com/awslabs/djl/blob/master/jupyter/README.md). // + // Add the snapshot repository to get the DJL snapshot artifacts // // %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/ // Add the maven dependencies // %maven ai.djl:api:0.4.0 // %maven org.slf4j:slf4j-api:1.7.26 // %maven org.slf4j:slf4j-simple:1.7.26 // See https://github.com/awslabs/djl/blob/master/mxnet/mxnet-engine/README.md // for more MXNet library selection options // %maven ai.djl.mxnet:mxnet-native-auto:1.6.0 // - import ai.djl.*; import ai.djl.nn.*; import ai.djl.nn.core.*; import ai.djl.training.*; // ## Step 2: Determine your input and output size // // The MLP model uses a one dimensional vector as the input and the output. You should determine the appropriate size of this vector based on your input data and what you will use the output of the model for. In a later tutorial, we will use this model for Mnist image classification. // // Our input vector will have size `28x28` because the input images have a height and width of 28 and it takes only a single number to represent each pixel. For a color image, you would need to further multiply this by `3` for the RGB channels. // // Our output vector has size `10` because there are `10` possible classes for each image. long inputSize = 28*28; long outputSize = 10; // ## Step 3: Create a **SequentialBlock** // // ### NDArray // // The core data type used for working with Deep Learning is the [NDArray](https://javadoc.djl.ai/api/0.4.0/index.html?ai/djl/ndarray/NDArray.html). An NDArray represents a multidimensional, fixed-size homogeneous array. It has very similar behavior to the Numpy python package with the addition of efficient computing. We also have a helper class, the [NDList](https://javadoc.djl.ai/api/0.4.0/index.html?ai/djl/ndarray/NDList.html) which is a list of NDArrays which can have different sizes and data types. // // ### Block API // // In DJL, [Blocks](https://javadoc.djl.ai/api/0.4.0/index.html?ai/djl/nn/Block.html) serve a purpose similar to functions that convert an input `NDList` to an output `NDList`. They can represent single operations, parts of a neural network, and even the whole neural network. What makes blocks special is that they contain a number of parameters that are used in their function and are trained during deep learning. As these parameters are trained, the function represented by the blocks get more and more accurate. // // When building these block functions, the easiest way is to use composition. Similar to how functions are built by calling other functions, blocks can be built by combining other blocks. We refer to the containing block as the parent and the sub-blocks as the children. // // // We provide several helpers to make it easy to build common block composition structures. For the MLP we will use the [SequentialBlock](https://javadoc.djl.ai/api/0.4.0/index.html?ai/djl/nn/SequentialBlock.html), a container block whose children form a chain of blocks where each child block feeds its output to the next child block in a sequence. // SequentialBlock block = new SequentialBlock(); // ## Step 4: Add blocks to SequentialBlock // // An MLP is organized into several layers. Each layer is composed of a [Linear Block](https://javadoc.djl.ai/api/0.4.0/index.html?ai/djl/nn/core/Linear.html) and a non-linear activation function. If we just had two linear blocks in a row, it would be the same as a combined linear block ($f(x) = W_2(W_1x) = (W_2W_1)x = W_{combined}x$). An activation is used to intersperse between the linear blocks to allow them to represent non-linear functions. We will use the popular [ReLU](https://javadoc.djl.ai/api/0.4.0/ai/djl/nn/Activation.html#reluBlock--) as our activation function. // // The first layer and last layers have fixed sizes depending on your desired input and output size. However, you are free to choose the number and sizes of the middle layers in the network. We will create a smaller MLP with two middle layers that gradually decrease the size. Typically, you would experiment with different values to see what works the best on your data set. // + block.add(Blocks.batchFlattenBlock(inputSize)); block.add(Linear.builder().setOutChannels(128).build()); block.add(Activation::relu); block.add(Linear.builder().setOutChannels(64).build()); block.add(Activation::relu); block.add(Linear.builder().setOutChannels(outputSize).build()); block // - // ## Summary // // Now that you've successfully created your first neural network, you can use this network to train your model. // // Next chapter: [Train your first model](train_your_first_model.ipynb) // // You can find the complete source code for this tutorial in the [model zoo](https://github.com/awslabs/djl/blob/master/model-zoo/src/main/java/ai/djl/basicmodelzoo/basic/Mlp.java).
jupyter/tutorial/create_your_first_network.ipynb
# --- # jupyter: # jupytext: # split_at_heading: true # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #export from fastai2.basics import * from fastai2.vision.all import * # + #default_exp vision.gan #default_cls_lvl 3 # - #hide from nbdev.showdoc import * # # GAN # # > Basic support for [Generative Adversial Networks](https://arxiv.org/abs/1406.2661) # GAN stands for [Generative Adversarial Nets](https://arxiv.org/pdf/1406.2661.pdf) and were invented by <NAME>. The concept is that we train two models at the same time: a generator and a critic. The generator will try to make new images similar to the ones in a dataset, and the critic will try to classify real images from the ones the generator does. The generator returns images, the critic a single number (usually a probability, 0. for fake images and 1. for real ones). # # We train them against each other in the sense that at each step (more or less), we: # 1. Freeze the generator and train the critic for one step by: # - getting one batch of true images (let's call that `real`) # - generating one batch of fake images (let's call that `fake`) # - have the critic evaluate each batch and compute a loss function from that; the important part is that it rewards positively the detection of real images and penalizes the fake ones # - update the weights of the critic with the gradients of this loss # # # 2. Freeze the critic and train the generator for one step by: # - generating one batch of fake images # - evaluate the critic on it # - return a loss that rewards posisitively the critic thinking those are real images # - update the weights of the generator with the gradients of this loss # > Note: The fastai library provides support for training GANs through the GANTrainer, but doesn't include more than basic models. # ## Wrapping the modules #export class GANModule(Module): "Wrapper around a `generator` and a `critic` to create a GAN." def __init__(self, generator=None, critic=None, gen_mode=False): if generator is not None: self.generator=generator if critic is not None: self.critic =critic store_attr(self, 'gen_mode') def forward(self, *args): return self.generator(*args) if self.gen_mode else self.critic(*args) def switch(self, gen_mode=None): "Put the module in generator mode if `gen_mode`, in critic mode otherwise." self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode # This is just a shell to contain the two models. When called, it will either delegate the input to the `generator` or the `critic` depending of the value of `gen_mode`. show_doc(GANModule.switch) # By default (leaving `gen_mode` to `None`), this will put the module in the other mode (critic mode if it was in generator mode and vice versa). #export @delegates(ConvLayer.__init__) def basic_critic(in_size, n_channels, n_features=64, n_extra_layers=0, norm_type=NormType.Batch, **kwargs): "A basic critic for images `n_channels` x `in_size` x `in_size`." layers = [ConvLayer(n_channels, n_features, 4, 2, 1, norm_type=None, **kwargs)] cur_size, cur_ftrs = in_size//2, n_features layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, norm_type=norm_type, **kwargs) for _ in range(n_extra_layers)] while cur_size > 4: layers.append(ConvLayer(cur_ftrs, cur_ftrs*2, 4, 2, 1, norm_type=norm_type, **kwargs)) cur_ftrs *= 2 ; cur_size //= 2 init = kwargs.get('init', nn.init.kaiming_normal_) layers += [init_default(nn.Conv2d(cur_ftrs, 1, 4, padding=0), init), Flatten()] return nn.Sequential(*layers) #export class AddChannels(Module): "Add `n_dim` channels at the end of the input." def __init__(self, n_dim): self.n_dim=n_dim def forward(self, x): return x.view(*(list(x.shape)+[1]*self.n_dim)) #export @delegates(ConvLayer.__init__) def basic_generator(out_size, n_channels, in_sz=100, n_features=64, n_extra_layers=0, **kwargs): "A basic generator from `in_sz` to images `n_channels` x `out_size` x `out_size`." cur_size, cur_ftrs = 4, n_features//2 while cur_size < out_size: cur_size *= 2; cur_ftrs *= 2 layers = [AddChannels(2), ConvLayer(in_sz, cur_ftrs, 4, 1, transpose=True, **kwargs)] cur_size = 4 while cur_size < out_size // 2: layers.append(ConvLayer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **kwargs)) cur_ftrs //= 2; cur_size *= 2 layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **kwargs) for _ in range(n_extra_layers)] layers += [nn.ConvTranspose2d(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()] return nn.Sequential(*layers) # + critic = basic_critic(64, 3) generator = basic_generator(64, 3) tst = GANModule(critic=critic, generator=generator) real = torch.randn(2, 3, 64, 64) real_p = tst(real) test_eq(real_p.shape, [2,1]) tst.switch() #tst is now in generator mode noise = torch.randn(2, 100) fake = tst(noise) test_eq(fake.shape, real.shape) tst.switch() #tst is back in critic mode fake_p = tst(fake) test_eq(fake_p.shape, [2,1]) # + #export _conv_args = dict(act_cls = partial(nn.LeakyReLU, negative_slope=0.2), norm_type=NormType.Spectral) def _conv(ni, nf, ks=3, stride=1, self_attention=False, **kwargs): if self_attention: kwargs['xtra'] = SelfAttention(nf) return ConvLayer(ni, nf, ks=ks, stride=stride, **_conv_args, **kwargs) # - #export @delegates(ConvLayer) def DenseResBlock(nf, norm_type=NormType.Batch, **kwargs): "Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`." return SequentialEx(ConvLayer(nf, nf, norm_type=norm_type, **kwargs), ConvLayer(nf, nf, norm_type=norm_type, **kwargs), MergeLayer(dense=True)) #export def gan_critic(n_channels=3, nf=128, n_blocks=3, p=0.15): "Critic to train a `GAN`." layers = [ _conv(n_channels, nf, ks=4, stride=2), nn.Dropout2d(p/2), DenseResBlock(nf, **_conv_args)] nf *= 2 # after dense block for i in range(n_blocks): layers += [ nn.Dropout2d(p), _conv(nf, nf*2, ks=4, stride=2, self_attention=(i==0))] nf *= 2 layers += [ ConvLayer(nf, 1, ks=4, bias=False, padding=0, norm_type=NormType.Spectral, act_cls=None), Flatten()] return nn.Sequential(*layers) #export class GANLoss(GANModule): "Wrapper around `crit_loss_func` and `gen_loss_func`" def __init__(self, gen_loss_func, crit_loss_func, gan_model): super().__init__() store_attr(self, 'gen_loss_func,crit_loss_func,gan_model') def generator(self, output, target): "Evaluate the `output` with the critic then uses `self.gen_loss_func`" fake_pred = self.gan_model.critic(output) self.gen_loss = self.gen_loss_func(fake_pred, output, target) return self.gen_loss def critic(self, real_pred, input): "Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.crit_loss_func`." fake = self.gan_model.generator(input.requires_grad_(False)).requires_grad_(True) fake_pred = self.gan_model.critic(fake) self.crit_loss = self.crit_loss_func(real_pred, fake_pred) return self.crit_loss # In generator mode, this loss function expects the `output` of the generator and some `target` (a batch of real images). It will evaluate if the generator successfully fooled the critic using `gen_loss_func`. This loss function has the following signature # ``` # def gen_loss_func(fake_pred, output, target): # ``` # to be able to combine the output of the critic on `output` (which the first argument `fake_pred`) with `output` and `target` (if you want to mix the GAN loss with other losses for instance). # # In critic mode, this loss function expects the `real_pred` given by the critic and some `input` (the noise fed to the generator). It will evaluate the critic using `crit_loss_func`. This loss function has the following signature # ``` # def crit_loss_func(real_pred, fake_pred): # ``` # where `real_pred` is the output of the critic on a batch of real images and `fake_pred` is generated from the noise using the generator. #export class AdaptiveLoss(Module): "Expand the `target` to match the `output` size before applying `crit`." def __init__(self, crit): self.crit = crit def forward(self, output, target): return self.crit(output, target[:,None].expand_as(output).float()) #export def accuracy_thresh_expand(y_pred, y_true, thresh=0.5, sigmoid=True): "Compute accuracy after expanding `y_true` to the size of `y_pred`." if sigmoid: y_pred = y_pred.sigmoid() return ((y_pred>thresh).byte()==y_true[:,None].expand_as(y_pred).byte()).float().mean() # ## Callbacks for GAN training #export def set_freeze_model(m, rg): for p in m.parameters(): p.requires_grad_(rg) #export class GANTrainer(Callback): "Handles GAN Training." run_after = TrainEvalCallback def __init__(self, switch_eval=False, clip=None, beta=0.98, gen_first=False, show_img=True): store_attr(self, 'switch_eval,clip,gen_first,show_img') self.gen_loss,self.crit_loss = AvgSmoothLoss(beta=beta),AvgSmoothLoss(beta=beta) def _set_trainable(self): train_model = self.generator if self.gen_mode else self.critic loss_model = self.generator if not self.gen_mode else self.critic set_freeze_model(train_model, True) set_freeze_model(loss_model, False) if self.switch_eval: train_model.train() loss_model.eval() def begin_fit(self): "Initialize smootheners." self.generator,self.critic = self.model.generator,self.model.critic self.gen_mode = self.gen_first self.switch(self.gen_mode) self.crit_losses,self.gen_losses = [],[] self.gen_loss.reset() ; self.crit_loss.reset() #self.recorder.no_val=True #self.recorder.add_metric_names(['gen_loss', 'disc_loss']) #self.imgs,self.titles = [],[] def begin_validate(self): "Switch in generator mode for showing results." self.switch(gen_mode=True) def begin_batch(self): "Clamp the weights with `self.clip` if it's not None, set the correct input/target." if self.training and self.clip is not None: for p in self.critic.parameters(): p.data.clamp_(-self.clip, self.clip) if not self.gen_mode: (self.learn.xb,self.learn.yb) = (self.yb,self.xb) def after_batch(self): "Record `last_loss` in the proper list." if not self.training: return if self.gen_mode: self.gen_loss.accumulate(self.learn) self.gen_losses.append(self.gen_loss.value) self.last_gen = to_detach(self.pred) else: self.crit_loss.accumulate(self.learn) self.crit_losses.append(self.crit_loss.value) def begin_epoch(self): "Put the critic or the generator back to eval if necessary." self.switch(self.gen_mode) #def after_epoch(self): # "Show a sample image." # if not hasattr(self, 'last_gen') or not self.show_img: return # data = self.learn.data # img = self.last_gen[0] # norm = getattr(data,'norm',False) # if norm and norm.keywords.get('do_y',False): img = data.denorm(img) # img = data.train_ds.y.reconstruct(img) # self.imgs.append(img) # self.titles.append(f'Epoch {epoch}') # pbar.show_imgs(self.imgs, self.titles) # return add_metrics(last_metrics, [getattr(self.smoothenerG,'smooth',None),getattr(self.smoothenerC,'smooth',None)]) def switch(self, gen_mode=None): "Switch the model and loss function, if `gen_mode` is provided, in the desired mode." self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode self._set_trainable() self.model.switch(gen_mode) self.loss_func.switch(gen_mode) # > Warning: The GANTrainer is useless on its own, you need to complete it with one of the following switchers #export class FixedGANSwitcher(Callback): "Switcher to do `n_crit` iterations of the critic then `n_gen` iterations of the generator." run_after = GANTrainer def __init__(self, n_crit=1, n_gen=1): store_attr(self, 'n_crit,n_gen') def begin_train(self): self.n_c,self.n_g = 0,0 def after_batch(self): "Switch the model if necessary." if not self.training: return if self.learn.gan_trainer.gen_mode: self.n_g += 1 n_iter,n_in,n_out = self.n_gen,self.n_c,self.n_g else: self.n_c += 1 n_iter,n_in,n_out = self.n_crit,self.n_g,self.n_c target = n_iter if isinstance(n_iter, int) else n_iter(n_in) if target == n_out: self.learn.gan_trainer.switch() self.n_c,self.n_g = 0,0 #export class AdaptiveGANSwitcher(Callback): "Switcher that goes back to generator/critic when the loss goes below `gen_thresh`/`crit_thresh`." run_after = GANTrainer def __init__(self, gen_thresh=None, critic_thresh=None): store_attr(self, 'gen_thresh,critic_thresh') def after_batch(self): "Switch the model if necessary." if not self.training: return if self.gan_trainer.gen_mode: if self.gen_thresh is None or self.loss < self.gen_thresh: self.gan_trainer.switch() else: if self.critic_thresh is None or self.loss < self.critic_thresh: self.gan_trainer.switch() #export class GANDiscriminativeLR(Callback): "`Callback` that handles multiplying the learning rate by `mult_lr` for the critic." run_after = GANTrainer def __init__(self, mult_lr=5.): self.mult_lr = mult_lr def begin_batch(self): "Multiply the current lr if necessary." if not self.learn.gan_trainer.gen_mode and self.training: self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']*self.mult_lr) def after_batch(self): "Put the LR back to its value if necessary." if not self.learn.gan_trainer.gen_mode: self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']/self.mult_lr) # ## GAN data #export class InvisibleTensor(TensorBase): def show(self, ctx=None, **kwargs): return ctx #export def generate_noise(fn, size=100): return cast(torch.randn(size), InvisibleTensor) #export @typedispatch def show_batch(x:InvisibleTensor, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs): if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize) ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs) return ctxs #export @typedispatch def show_results(x:InvisibleTensor, y:TensorImage, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs): if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize) ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))] return ctxs bs = 128 size = 64 dblock = DataBlock(blocks = (TransformBlock, ImageBlock), get_x = generate_noise, get_items = get_image_files, splitter = IndexSplitter([]), item_tfms=Resize(size, method=ResizeMethod.Crop), batch_tfms = Normalize.from_stats(torch.tensor([0.5,0.5,0.5]), torch.tensor([0.5,0.5,0.5]))) path = untar_data(URLs.LSUN_BEDROOMS) dls = dblock.dataloaders(path, path=path, bs=bs) dls.show_batch(max_n=16) # ## GAN Learner #export def gan_loss_from_func(loss_gen, loss_crit, weights_gen=None): "Define loss functions for a GAN from `loss_gen` and `loss_crit`." def _loss_G(fake_pred, output, target, weights_gen=weights_gen): ones = fake_pred.new_ones(fake_pred.shape[0]) weights_gen = ifnone(weights_gen, (1.,1.)) return weights_gen[0] * loss_crit(fake_pred, ones) + weights_gen[1] * loss_gen(output, target) def _loss_C(real_pred, fake_pred): ones = real_pred.new_ones (real_pred.shape[0]) zeros = fake_pred.new_zeros(fake_pred.shape[0]) return (loss_crit(real_pred, ones) + loss_crit(fake_pred, zeros)) / 2 return _loss_G, _loss_C #export def _tk_mean(fake_pred, output, target): return fake_pred.mean() def _tk_diff(real_pred, fake_pred): return real_pred.mean() - fake_pred.mean() # + #export @delegates() class GANLearner(Learner): "A `Learner` suitable for GANs." def __init__(self, dls, generator, critic, gen_loss_func, crit_loss_func, switcher=None, gen_first=False, switch_eval=True, show_img=True, clip=None, cbs=None, metrics=None, **kwargs): gan = GANModule(generator, critic) loss_func = GANLoss(gen_loss_func, crit_loss_func, gan) if switcher is None: switcher = FixedGANSwitcher(n_crit=5, n_gen=1) trainer = GANTrainer(clip=clip, switch_eval=switch_eval, show_img=show_img) cbs = L(cbs) + L(trainer, switcher) metrics = L(metrics) + L(*LossMetrics('gen_loss,crit_loss')) super().__init__(dls, gan, loss_func=loss_func, cbs=cbs, metrics=metrics, **kwargs) @classmethod def from_learners(cls, gen_learn, crit_learn, switcher=None, weights_gen=None, **kwargs): "Create a GAN from `learn_gen` and `learn_crit`." losses = gan_loss_from_func(gen_learn.loss_func, crit_learn.loss_func, weights_gen=weights_gen) return cls(gen_learn.dls, gen_learn.model, crit_learn.model, *losses, switcher=switcher, **kwargs) @classmethod def wgan(cls, dls, generator, critic, switcher=None, clip=0.01, switch_eval=False, **kwargs): "Create a WGAN from `data`, `generator` and `critic`." return cls(dls, generator, critic, _tk_mean, _tk_diff, switcher=switcher, clip=clip, switch_eval=switch_eval, **kwargs) GANLearner.from_learners = delegates(to=GANLearner.__init__)(GANLearner.from_learners) GANLearner.wgan = delegates(to=GANLearner.__init__)(GANLearner.wgan) # - from fastai2.callback.all import * generator = basic_generator(64, n_channels=3, n_extra_layers=1) critic = basic_critic (64, n_channels=3, n_extra_layers=1, act_cls=partial(nn.LeakyReLU, negative_slope=0.2)) learn = GANLearner.wgan(dls, generator, critic, opt_func = RMSProp) learn.recorder.train_metrics=True learn.recorder.valid_metrics=False #slow learn.fit(1, 2e-4, wd=0.) learn.show_results(max_n=9, ds_idx=0) # ## Export - #hide from nbdev.export import notebook2script notebook2script()
nbs/24_vision.gan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook does the following: # 1. Load raw reddit submission data from dataframes scraped by `scripts/get_reddit_gamethreads.py`. # 2. Add a couple of useful columns: `date` for the date that the game was played, and `is_postgame` indicating # whether the post is an in-game thread or post-game thread # 3. Parse the title to extract the names of the two teams playing, in the form of team_ids as given in `data/teams.csv`. # 4. Add a `gid` column, of the form `YYYY-mm-dd_team1ID_team2ID`, where `(team1ID,team2ID)` are the team_ids of the # two teams sorted in alphabetical order. This uniquely identifies each game. # + import glob import datetime as dt import re import json import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import watchcbb.reddit_utils as ru # - # ### Load gamethread dataframes from gzipped pickled files dfs = [] for fname in glob.glob('../data/gamethreads/*/*.pkl.gz'): with open(fname, 'rb') as fid: df = pd.read_pickle(fid, compression='gzip') dfs.append(df) df = pd.concat(dfs) df = df[df.title.apply(lambda x: '[game' in x.lower() or '[post' in x.lower())].reset_index() print(df.shape) df.head() # ### Add `timestamp`, `date`, and `is_postgame` columns df["timestamp"] = df.created.apply(dt.datetime.fromtimestamp) # UNIX epoch -> datetime (in UTC) df["date"] = df.created.apply(ru.date_from_timestamp) # Convert to date of game df["is_postgame"] = df.title.apply(lambda x:"[post " in x.lower()) # in-game or post-game thread? df = df.sort_values("timestamp").reset_index() df[["timestamp","date","id","title","is_postgame","ups","num_comments"]].head(30) # ### Parse the titles into 2-tuples of team names print((df.title.apply(ru.parse_title).isna()).sum()) df.title.apply(ru.parse_title).head(20) # ### Convert team names generated in above cell to team_ids # * These should match those stored in `data/teams.csv`. # * Replacements are made to fix some common errors # * A value of `None` is stored when we can't guess the corred ID # * The list outputted below shows the most common names that weren't able to be parsed # + import importlib ru = importlib.reload(ru) ru._BAD = [] df["team_ids"] = df.title.apply(ru.parse_title).apply(ru.fix_names) print(df.team_ids.isna().sum()) pd.Series(ru._BAD).value_counts().head(20) # - # ### Now generate a unique `gid` for each game # Format is `YYYY-mm-dd_teamID1_teamID2`, where `(teamID1,teamID2)` are sorted in alphabetical order # + def get_gid(row): if row.team_ids is None: return None ids = sorted(row.team_ids) return f'{row.date}_{ids[0]}_{ids[1]}' df["gid"] = df.apply(get_gid, axis=1) tosave = df[["date","timestamp","id","gid","is_postgame","ups","num_comments","title"]].copy() tosave.head(10) # - # ### Some `gid`s are duplicated due to multiple/deleted posts. These are the ones with highest multiplicity. tosave.gid.value_counts().head(10) # ### We want to save only the one example of game/post-game thread with highest comment count # * First sort by `gid` and `is_postgame` to group into groups that we want to reduce # * Sort by `num_comments` and `ups`, and we will then keep only the last example of each group tosave = tosave.sort_values(['gid','is_postgame','num_comments','ups']) tosave[tosave.gid=='2019-11-10_florida_florida-state'] # ### Now drop the unwanted duplicates (as well as `None` gid values) print("Before drop duplicates", tosave.shape) tosave = tosave.drop_duplicates(['gid','is_postgame'], keep='last') print("Before dropna", tosave.shape) tosave.dropna(axis=0, subset=['gid'],inplace=True) print("After dropna", tosave.shape) tosave[tosave.gid=='2019-11-10_florida_florida-state'] # ### Check that everything looks good, and save the cleaned dataframe to a pickled file tosave[tosave.gid.apply(lambda x:"_purdue" in x)].head(20) with open('../data/gamethreads/aggregated_cleaned_2017-2020.pkl.gz', 'wb') as fid: tosave.to_pickle(fid, compression='gzip') # ### Now make a couple of plots of #comments to make sure everything looks OK # + fig, axs = plt.subplots(1,2,figsize=(15,6)) axs[0].hist(np.log10(tosave.loc[df.is_postgame].num_comments.values+1), bins=np.linspace(0,4,41), histtype='step', lw=2, label="Post-Game Thread") axs[0].hist(np.log10(tosave.loc[~df.is_postgame].num_comments.values+1), bins=np.linspace(0,4,41), histtype='step', lw=2, label="In-Game Thread") axs[0].hist(0.844*np.log10(tosave.loc[~df.is_postgame].num_comments.values+1), bins=np.linspace(0,4,41), histtype='step', lw=2, label="0.84*In-Game Thread") axs[0].legend(fontsize='x-large') axs[0].set_xlabel("log10(#comments + 1)", fontsize='large') xs = [] ys = [] for gid in tosave.gid.unique(): mask1 = (tosave.gid==gid) & ~tosave.is_postgame mask2 = (tosave.gid==gid) & tosave.is_postgame if mask1.sum()!=1 or mask2.sum()!=1: continue xs.append(tosave.num_comments[mask1].values[0]) ys.append(tosave.num_comments[mask2].values[0]) xs = np.log10(np.array(xs)+1) ys = np.log10(np.array(ys)+1) axs[1].scatter(xs, ys, s=20, alpha=0.25) # p = np.polyfit(xs, ys, deg=1) a = np.sum(xs*ys)/np.sum(xs*xs) p = [a,0] print(p) axs[1].plot([0,4],p[0]*np.array([0,4])+p[1],'r-') axs[1].set_xlabel('In-game') axs[1].set_ylabel('Post-game') # -
notebooks/gamethread_title_parse.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Kernel Density Estimate of Species Distributions # This shows an example of a neighbors-based query (in particular a kernel # density estimate) on geospatial data, using a Ball Tree built upon the # Haversine distance metric -- i.e. distances over points in latitude/longitude. # The dataset is provided by Phillips et. al. (2006). # If available, the example uses # `basemap <https://matplotlib.org/basemap/>`_ # to plot the coast lines and national boundaries of South America. # # This example does not perform any learning over the data # (see `sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for # an example of classification based on the attributes in this dataset). It # simply shows the kernel density estimate of observed data points in # geospatial coordinates. # # The two species are: # # - `"Bradypus variegatus" # <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , # the Brown-throated Sloth. # # - `"Microryzomys minutus" # <http://www.iucnredlist.org/details/13408/0>`_ , # also known as the Forest Small Rice Rat, a rodent that lives in Peru, # Colombia, Ecuador, Peru, and Venezuela. # # ## References # # * `"Maximum entropy modeling of species geographic distributions" # <http://rob.schapire.net/papers/ecolmod.pdf>`_ # <NAME>, <NAME>, <NAME> - Ecological Modelling, # 190:231-259, 2006. # # + # Author: <NAME> <<EMAIL>> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_species_distributions from sklearn.neighbors import KernelDensity # if basemap is available, we'll use it. # otherwise, we'll improvise later... try: from mpl_toolkits.basemap import Basemap basemap = True except ImportError: basemap = False def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid) # Get matrices/arrays of species IDs and locations data = fetch_species_distributions() species_names = ['Bradypus Variegatus', 'Microryzomys Minutus'] Xtrain = np.vstack([data['train']['dd lat'], data['train']['dd long']]).T ytrain = np.array([d.decode('ascii').startswith('micro') for d in data['train']['species']], dtype='int') Xtrain *= np.pi / 180. # Convert lat/long to radians # Set up the data grid for the contour plot xgrid, ygrid = construct_grids(data) X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1]) land_reference = data.coverages[6][::5, ::5] land_mask = (land_reference > -9999).ravel() xy = np.vstack([Y.ravel(), X.ravel()]).T xy = xy[land_mask] xy *= np.pi / 180. # Plot map of South America with distributions of each species fig = plt.figure() fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05) for i in range(2): plt.subplot(1, 2, i + 1) # construct a kernel density estimate of the distribution print(" - computing KDE in spherical coordinates") kde = KernelDensity(bandwidth=0.04, metric='haversine', kernel='gaussian', algorithm='ball_tree') kde.fit(Xtrain[ytrain == i]) # evaluate only on the land: -9999 indicates ocean Z = np.full(land_mask.shape[0], -9999, dtype='int') Z[land_mask] = np.exp(kde.score_samples(xy)) Z = Z.reshape(X.shape) # plot contours of the density levels = np.linspace(0, Z.max(), 25) plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds) if basemap: print(" - plot coastlines using basemap") m = Basemap(projection='cyl', llcrnrlat=Y.min(), urcrnrlat=Y.max(), llcrnrlon=X.min(), urcrnrlon=X.max(), resolution='c') m.drawcoastlines() m.drawcountries() else: print(" - plot coastlines from coverage") plt.contour(X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid") plt.xticks([]) plt.yticks([]) plt.title(species_names[i]) plt.show()
3_ml_start_knn_examples/plot_species_kde.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="3CA0tdmlIKk4" colab_type="text" # # Flocculation Design Challenge # # Learn how to use the AguaClara code distribution and python to design a flocculator! # The [AguaClara code documentation](https://aguaclara.github.io/aguaclara/index.html) will be helpful as you search for useful functions. # # 30 points total # * 4 for style (define variables, comments in code, clear names, answers in sentences) # * 26 for questions # + id="rQ40v4xNxLRW" colab_type="code" outputId="1da61da3-9f5f-4352-fffb-d136b9ba7f07" executionInfo={"status": "ok", "timestamp": 1569503070498, "user_tz": 240, "elapsed": 7651, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDmYNDq6ij0468RSHe1goXE_t9gbSPdq5OAsU4-ejQ=s64", "userId": "08369668289863895493"}} colab={"base_uri": "https://localhost:8080/", "height": 513} # !pip install aguaclara # + id="wufnKCzBHPC5" colab_type="code" colab={} from aguaclara.core.units import unit_registry as u import aguaclara as ac import numpy as np import matplotlib.pyplot as plt # + [markdown] id="BVUZ6Hkl8xRh" colab_type="text" # # Velocity gradients and flow geometry # # # ### 1) (2 points) # # Coagulant is injected in the center a long straight pipe. The pipe is 12 inches Nominal Diameter schedule 40 PVC and the flow rate is 120 L/s at $10^{\circ}C$. What distance is required for the coagulant to be completely mixed with the water in the pipe? Note that this estimate is based on the time required for an eddy to traverse the diameter of the pipe and that a safety factor of order 3 * $\pi$/2 would be reasonable. The eddy is assumed to not travel in a straight line and thus it more likely travels a distance of $\pi$/2 an the factor of 3 is an additional safety factor.Include this safety factor in the calculations. See the [(equation for pipe mixing)](https://aguaclara.github.io/Textbook/Rapid_Mix/RM_Derivations.html?highlight=energy%20dissipation#equation-rapid-mix-rm-derivations-42). # # * 1 point for correct friction factor # * 1 point for correct distance # + id="1DAxoEF09MNd" colab_type="code" colab={} # + [markdown] id="mburwR8_939o" colab_type="text" # ### 2) (1 point) # # What is the residence time in this mixing zone? # # * 1 point for correct answer # + id="7p958wsl97dP" colab_type="code" colab={} # + [markdown] id="7o7iBw1L-G-m" colab_type="text" # ### 3) (1 point) # # How much head loss from wall shear will have occurred in the pipe in the distance measured in the previous problem? This analysis reveals how little energy is required to blend the coagulant with the raw water. # # * 1 point for correct answer # + id="3G_lIWpK-Net" colab_type="code" colab={} # + [markdown] id="sNgXh70UAbil" colab_type="text" # ### 4) (1 point) # # What is the [Camp Stein velocity gradient](https://aguaclara.github.io/Textbook/Rapid_Mix/RM_Intro.html?highlight=camp%20stein#id15) in this pipe flow? # # * 1 point for correct answer # # + id="Yvv15jbGAexg" colab_type="code" colab={} # + [markdown] id="jHaRWU6XAiOe" colab_type="text" # ### 5) (2 points) # # What is the $G\theta$ for this mixing zone and how does it compare with the $G\theta$ recommended for [mechanical mixing units](https://aguaclara.github.io/Textbook/Rapid_Mix/RM_Intro.html#maximum-velocity-gradients)? # # * 1 for correct Gt # * 1 point for comparison # # + id="d_lPTfo6Ap6Q" colab_type="code" colab={} # + [markdown] id="ERhG_WGwCEg4" colab_type="text" # ### 6) (1 point) # # What is the velocity gradient at the wall of the pipe? This will make it apparent that the velocity gradient is far from constant # # * 1 point for correct answer # + id="vNVDpZ6PCFba" colab_type="code" colab={} # + [markdown] id="F_V3xcPgCNeZ" colab_type="text" # ### 7) (2 points) # # Suppose we insert a [flat plate oriented with the flat surface facing the flow](https://aguaclara.github.io/Textbook/Rapid_Mix/RM_Derivations.html?highlight=flat%20plate#behind-a-flat-plate) inside the pipe. Let the width of the plate be 0.5 cm so it is small enough that it doesn't significantly increase the velocity in the pipe. What is the maximum velocity gradient downstream of the plate? You may neglect the fact that the velocity in the center of the turbulent pipe flow is slightly higher than the average velocity. # # * 1 for correct Ratio # * 1 for correct velocity gradient # + id="s_1GJKRMCQJo" colab_type="code" colab={} # + [markdown] id="Oo7xIHrSCW4Y" colab_type="text" # ### 8) (1 point) # What happens to the velocity gradient if a narrower flat plate is used? Does the maximum velocity gradient increase or decrease? Just look at the equation to answer this! # # * 1 point for correct answer # + [markdown] id="60KdrJ-YCXeg" colab_type="text" # # + [markdown] id="CwAM-0xkEEe8" colab_type="text" # # Flocculation model # # ### 1) (2 points) # How far will two kaolin clay particles (density of 2650 $\frac{kg}{m^3}$) with a diameter of 5 $\mu m$ travel relative to each if they are in a uniform velocity gradient of 100 Hz for 400 s and separated (in the direction of the velocity gradient) by their average separation distance based on a turbidity of 0.5 NTU? # # We have defined NTU as a unit based on the concentration of clay in the aguaclara code base. You can derive these simple equations yourself or find them in the text. Note that in a uniform velocity gradient $\bar G = G_{CS}$. The [floc model code documentation](https://aguaclara.github.io/aguaclara/research/floc_model.html) and the [floc model chapter](https://aguaclara.github.io/Textbook/Flocculation/Floc_Model.html) will be helpful. The relative displacement caused simply by the deformation of the fluid is impressive! # # * 1 point for correct separation distance # * 1 point for correct travel distance # + id="bccfOxr-EHlX" colab_type="code" colab={} # + [markdown] id="AyGEfBI2EOqn" colab_type="text" # ### 2) (2 points) # # How much volume is "[cleared](https://aguaclara.github.io/Textbook/Flocculation/Floc_Model.html#equation-flocculation-floc-model-8)" by these particles divided by the volume occupied by the particles? This ratio is essentially how many times these particles should have collided in the 400 s. # # This analysis illustrates why 1 NTU is a practical limit for flocculation. Assuming that we don't want to apply so much coagulant that the clay particles are completely covered with coagulant, then some fraction of the collisions will be ineffective. Thus at 1 NTU a Gtheta of 40,000 might only cause one successful collision. # # * 1 for correct cleared volume # * 1 for correct occupied volume # + id="NzbU8xbxERPC" colab_type="code" colab={} # + [markdown] id="dOkUG4liEc2w" colab_type="text" # # Flocculator design # # Below we design a flocculator using [ac.Flocculator](https://aguaclara.github.io/aguaclara/design/floc.html) in the aguaclara distribution version. We will use the default settings for this design except change the flow rate to 60 L/s. The available inputs (and their default values) that you can change are shown in the [documentation](https://aguaclara.github.io/aguaclara/design/floc.html). You can change any of these parameters by including their keywords in the function call. # # See the [current cad drawing of a flocculator and entrance tank](https://cad.onshape.com/documents/c3a8ce032e33ebe875b9aab4/w/de9ad5474448b34f33fef097/e/08f41d8bdd9a9c90ab396f8a). # + id="nl1iJikiEh_x" colab_type="code" outputId="88a90b63-7d18-46fb-8aed-a7909a8223f7" executionInfo={"status": "ok", "timestamp": 1569504425210, "user_tz": 240, "elapsed": 246, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDmYNDq6ij0468RSHe1goXE_t9gbSPdq5OAsU4-ejQ=s64", "userId": "08369668289863895493"}} colab={"base_uri": "https://localhost:8080/", "height": 204} flow=60 * u.L/u.s myF = ac.Flocculator(q=flow) print('The number of channels is', myF.chan_n) print('The channel length is',myF.chan_l) print('The channel width is',ac.round_sig_figs(myF.chan_w,2)) print('The spacing between baffles is',ac.round_sig_figs(myF.baffle_s,2)) print('The number of obstacles per baffle is', myF.obstacle_n) print('The velocity gradient is', ac.round_sig_figs(myF.vel_grad_avg,2)) print('The residence time (not counting the effect of head loss) is',ac.round_sig_figs(myF.retention_time,2)) print('The maximum distance between flow expansions is', ac.round_sig_figs(myF.expansion_h_max,2)) print('The drain diameter is', myF.drain_pipe.size) print('The Gt is',myF.gt) print('The length of the first channel occupied by the entrance tank is',myF.ent_l) # + [markdown] id="QJVXHh9uJc-h" colab_type="text" # ## Calculations and analysis # # ### 1) (2 points) # # How many expansions are there in total? Estimate this based on the spacing and flocculator size. You will have to account for the entrance tank that occupies volume in the first flocculator channel. # # * 2 points for correct answer # + id="YkiAXqGbJgfH" colab_type="code" colab={} # + [markdown] id="WCv6bN7AKQGL" colab_type="text" # ### 2) (2 points) # What is the head loss per expansion? (Calculate this head loss using the minor loss equation) You can use the BAFFLE_K that is defined in the flocculator class. # # * 1 point for correct velocity # * 1 point for correct headloss # + id="hakmCBhlKSpA" colab_type="code" colab={} # + [markdown] id="excnCsR7Kdfv" colab_type="text" # ### 3) (1 point) # What is the total head loss of all of the expansions? Compare this with the target head loss of 40 cm. # # * 1 point for correct answer # + id="TIdvFS75Kf7a" colab_type="code" colab={} # + [markdown] id="Ffp3QLYSKxOk" colab_type="text" # ### 4) (5 points) # Change the design temperature over a range that would be applicable in Ithaca (0 to 30 degC) for a flocculator design of your choice. What happens as the temperature increases? Plot the following: # * residence time # * velocity gradient # * baffle spacing # * number of channels # * channel width # # all as functions of temperature. Explain WHY these design changes occur. # # Hints: # * I suggest creating about 50 designs # * create a numpy array of flocculator objects. # ``` # MyFn = 50 # myFs =np.empty(MyFn, dtype=type(myF)) # ``` # * create empty numpy arrays with the correct units for each parameter that you want to plot # * use a single for loop to cycle through each design and extract the parameters that you want to plot from the flocculator objects (MyF) and place those values in the arrays that you created. # # Points # * 1 for each Graph # # Make sure that each graph has correct axis labels with units! # + id="5nYovZ_2K2Oy" colab_type="code" colab={} # + [markdown] id="xmJa3qG1Nf1Q" colab_type="text" # The water becomes more viscous as it gets colder. Thus it becomes more difficult to deform. Given that we are limiting the amount of energy that we are willing to use, we have to compensate by deforming the fluid more slowly. Thus if we hold the amount of energy available as a constant, then the velocity gradient decreases as the temperature decreases and the residence time increases. The number of channels increases as the temperature drops because the design ran up against the maximum channel width constraint as the flocculator volume increased. # + [markdown] id="PVXiojzANjXM" colab_type="text" # ### 5) (1 point) # When designing a flocculator how should you select the design temperature? # # * 1 point for correct answer # + [markdown] id="4m45FS6JNmeI" colab_type="text" # # + [markdown] id="0N9YaAjjIIMW" colab_type="text" # ### 6) (4 points) # Here at Cornell and in Honduras we have been experimenting with flocculators that have a $G\theta$ of 20,000 and a head loss of 50 cm for use in Honduras where the minimum temperature is about 15 $^\circ C$. Create a design for an entrance tank and flocculator with these inputs and flow rates of 10 L/s and 100 L/s. Previously in this design challenge you designed a flocculator. The AguaClara entrance tank is incorporated into the first channel of the flocculator. In object oriented programming this is handled by creating an object that is an [entrance tank flocculator assembly](https://aguaclara.github.io/aguaclara/design/ent_floc.html) that contains the entrance tank with an LFOM and the flocculator. This higher level assembly is able to optimize the width of the flocculator channels to best accommodate the flocculator and the entrance tank. # # Hints... # # * Place the two flows in an array and use a for loop to cycle through the two designs and print the design outputs. # * Create a design: `myfastetf = ac.EntTankFloc(q=myq, floc = ac.Flocculator(q=myq, gt=20000, temp=15 * u.degC, hl = 50 * u.cm))` # * At minimum you need to print the width and length of the flocculator! You might be curious about how other values have changed too. # * Notice how we can set the values for a sub assembly inside the list of inputs for the assembly. # # # Given these designs would you recommend that we change our plant layout to allow a single channel flocculator? # List as many design implications as you can think of for this potential change. Check out the [current cad drawing](https://cad.onshape.com/documents/c3a8ce032e33ebe875b9aab4/w/de9ad5474448b34f33fef097/e/08f41d8bdd9a9c90ab396f8a) and explain why the plan view area of the plant would change and identify what else would need to change if the flocculator only had one channel. How is the entrance tank drained to remove accumulated solids? Do these designs make sense? As of 9/26/2019 the channel width for the low flow was not constructable. # # * 2 for designs # * 2 for recommendation # + [markdown] id="WyrWyNMnQg9r" colab_type="text" # The entrance tank/flocculator would be less costly and the plan view area of the plant would be decreased because there wouldn't be so much wasted space at the end of the flocculator. # The entrance tank has a drain for solids that accumulate and now that drain would be far from the central drain channel. It isn't clear how that drain would be connected because it will contain large heavy solids that could easily clog a horizontal drain pipe. # + [markdown] id="3729SVJP8de0" colab_type="text" # # + [markdown] id="JkbUM5hHTW6w" colab_type="text" # # Musings on the design for a horizontal flow flocculator # # The calculations and musings below are to get you thinking about what happens as we design larger water treatment plants! # # The maximum flow for a vertical flow flocculator is... # # $$Q = \frac{W_{Max}H_e}{\Pi_{HS_{Min}} } \left( \frac{2 H_e g^2 h_e^2}{K(G\theta)^2 \nu} \right)^\frac{1}{3}$$ # + id="jscBzneZTcuu" colab_type="code" outputId="7f80e5c4-b289-45ae-b363-e88b72600a2b" executionInfo={"status": "ok", "timestamp": 1569503071086, "user_tz": 240, "elapsed": 8172, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDmYNDq6ij0468RSHe1goXE_t9gbSPdq5OAsU4-ejQ=s64", "userId": "08369668289863895493"}} colab={"base_uri": "https://localhost:8080/", "height": 34} W_Max = 1.08 * u.m K= 2.56 Ratio_H_S = 3 h_L = 40*u.cm H_Min = 2*u.m Gtheta = 3.7e4 T_Des = 15 * u.degC #Find General Design Parameters nu = ac.viscosity_kinematic(T_Des) Q = (W_Max * H_Min/Ratio_H_S * ((2*H_Min*u.gravity**2*h_L**2)/(K*Gtheta**2 * nu))**(1/3)).to(u.L/u.s) print('The maximum flow rate for a vertical flow flocculator is',Q) # + [markdown] id="vpJexw4Q5sSy" colab_type="text" # # The minimum flow for a horizontal flow flocculator is... # # # $$Q = W_{Min}S_{Min} \left( \frac{2 H_e g^2 h_e^2}{K(G\theta)^2 \nu} \right)^\frac{1}{3}$$ # # * Here W is the width of flow which is the depth of water. # * He is the distance between flow expansions which is the width of the channel # + id="nOzDscp6vIEa" colab_type="code" outputId="1993bd20-5aa8-40d3-d69b-cdddc49258f4" executionInfo={"status": "ok", "timestamp": 1569503071088, "user_tz": 240, "elapsed": 8168, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDmYNDq6ij0468RSHe1goXE_t9gbSPdq5OAsU4-ejQ=s64", "userId": "08369668289863895493"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # Now find the minimum flow for a horizontal flow flocculator given the constraint that the spacing must be 0.5 m for the masons W_Min = 2*u.m S_Min = 0.5 * u.m H_Min = Ratio_H_S*S_Min Q = (W_Min * S_Min * ((2*H_Min*u.gravity**2*h_L**2)/(K*Gtheta**2 * nu))**(1/3)).to(u.L/u.s) print('The minimum flow rate for a horizontal flow flocculator is',Q) # + [markdown] id="XJvowwb27AK_" colab_type="text" # There is a gap in flow between our maximum vertical flow and minimum horizontal flow. We can bridge this gap by either increasing the depth of the vertical flow flocculator or decreasing the depth of the horizontal flow flocculator. We can solve the previous equation for W_Min given the flow rate required to find horizontal flow depth solutions that will work for flows between 180 and 220 L/s. # # We need to consult with the implementation partner to see what they would regard as the optimal depth for the flocculator tank. It is possible that we could increase the depth and provide an access ladder into the flocculator. This would allow us to minimize the plan view area required. # # Presumably the total wall area should be minimized to reduce construction costs. We need to determine if the minimum cost is at an H/S ratio of 3 or at an H/S ratio of 6. The optimal solution might depend on whether the walls that serve as baffles are equal or lower in cost to the walls used to create channels. This design process will need to include an exploration of possible materials and fabrication methods for the baffles. The baffles don't experience significant hydrostatic pressure and thus they must primarily be strong enough to resist being pushed by a human during construction or maintenance. # #
DC/Floc_DC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # Self-Driving Car Engineer Nanodegree # # ## Deep Learning # # ## Project: Build a Traffic Sign Recognition Classifier # # In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. # # > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n", # "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. # # In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project. # # The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file. # # # >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. # + [markdown] deletable=true editable=true # --- # ## 1. Load The CIFAR10 Data # + deletable=true editable=true # Load pickled data import pickle from keras.datasets import cifar10 from sklearn.model_selection import train_test_split (X_train_temp, y_train_temp), (X_test, y_test) = cifar10.load_data() # y_train.shape is 2d, (50000, 1). While Keras is smart enough to handle this # it's a good idea to flatten the array. y_train_temp = y_train_temp.reshape(-1) y_test = y_test.reshape(-1) X_train, X_valid, y_train, y_valid = train_test_split(X_train_temp, y_train_temp, test_size=0.33, random_state=0) assert(len(X_train) == len(y_train)) assert(len(X_valid) == len(y_valid)) assert(len(X_test) == len(y_test)) print("Loading done!") # + [markdown] deletable=true editable=true # --- # # ## Step 1: Dataset Summary & Exploration # # The pickled data is a dictionary with 4 key/value pairs: # # - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels). # - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id. # - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image. # - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES** # # Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. # + [markdown] deletable=true editable=true # ## 2. Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas # + deletable=true editable=true ### Replace each question mark with the appropriate value. ### Use python, pandas or numpy methods rather than hard coding the results import numpy as np # Number of training examples n_train = len(X_train) # Number of testing examples. n_test = len(X_test) # Number of validation examples n_valid = len(X_valid) # TODO: What's the shape of an traffic sign image? image_shape = X_train[0].shape # TODO: How many unique classes/labels there are in the dataset. n_classes = np.unique(y_train).size print("Number of training examples =", n_train) print("Number of validation examples =", n_valid) print("Number of testing examples =", n_test) print("Image data shape =", image_shape) print("Number of classes =", n_classes) # + [markdown] deletable=true editable=true # ## 3. Include an exploratory visualization of the dataset # + [markdown] deletable=true editable=true # Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. # # The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python. # # **NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. # + deletable=true editable=true import matplotlib.pyplot as plt import random import numpy as np import csv import pandas as pd # Visualizations will be shown in the notebook. # %matplotlib inline def show_sample(features, labels, histogram = 1, sample_num = 1, sample_index = -1, color_map ='brg'): if histogram == 1 : col_num = 2 #Create training sample + histogram plot f, axarr = plt.subplots(sample_num+1, col_num, figsize=(col_num*4,(sample_num+1)*3)) else: if sample_num <= 4: col_num = sample_num else: col_num = 4 if sample_num%col_num == 0: row_num = int(sample_num/col_num) else: row_num = int(sample_num/col_num)+1 if sample_num == 1: #Create training sample plot f, ax = plt.subplots(row_num, col_num) else: #Create training sample plot f, axarr = plt.subplots(row_num, col_num, figsize=(col_num*4,(row_num+1)*2)) signnames = pd.read_csv('signnames.csv') index = sample_index - 1 for i in range(0, sample_num, 1): if sample_index < -1: index = random.randint(0, len(features)) else: index = index + 1 if histogram == 1 : image = features[index].squeeze() axarr[i,0].set_title('%s' % signnames.iloc[labels[index], 1]) axarr[i,0].imshow(image,color_map) hist,bins = np.histogram(image.flatten(),256, normed =1 ) cdf = hist.cumsum() cdf_normalized = cdf * hist.max()/ cdf.max() axarr[i,1].plot(cdf_normalized, color = 'b') axarr[i,1].hist(image.flatten(),256, normed =1, color = 'r') axarr[i,1].legend(('cdf','histogram'), loc = 'upper left') axarr[i,0].axis('off') axarr[sample_num,0].axis('off') axarr[sample_num,1].axis('off') else: image = features[index].squeeze() if row_num > 1: axarr[int(i/col_num),i%col_num].set_title('%s' % signnames.iloc[labels[index], 1]) axarr[int(i/col_num),i%col_num].imshow(image,color_map) axarr[int(i/col_num),i%col_num].axis('off') axarr[int(i/col_num),i%col_num].axis('off') axarr[int(i/col_num),i%col_num].axis('off') elif sample_num == 1: ax.set_title('%s' % signnames.iloc[labels[index], 1]) ax.imshow(image,color_map) ax.axis('off') ax.axis('off') ax.axis('off') else: axarr[i%col_num].set_title('%s' % signnames.iloc[labels[index], 1]) axarr[i%col_num].imshow(image,color_map) axarr[i%col_num].axis('off') axarr[i%col_num].axis('off') axarr[i%col_num].axis('off') # Tweak spacing to prevent clipping of title labels f.tight_layout() plt.show() def show_training_dataset_histogram(labels_train,labels_valid,labels_test): fig, ax = plt.subplots(figsize=(15,5)) temp = [labels_train,labels_valid,labels_test] n_classes = np.unique(y_train).size # the histogram of the training data n, bins, patches = ax.hist(temp, n_classes, label=["Train","Valid","Test"]) ax.set_xlabel('Classes') ax.set_ylabel('Number of occurence') ax.set_title(r'Histogram of the data sets') ax.legend(bbox_to_anchor=(1.01, 1), loc="upper left") plt.show() show_training_dataset_histogram(y_train,y_valid,y_test) show_sample(X_train, y_train, sample_num = 6) # + [markdown] deletable=true editable=true # ---- # # ## Step 2: Design and Test a Model Architecture # # Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset). # # The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! # # With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. # # There are various aspects to consider when thinking about this problem: # # - Neural network architecture (is the network over or underfitting?) # - Play around preprocessing techniques (normalization, rgb to grayscale, etc) # - Number of examples per label (some have more than others). # - Generate fake data. # # Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. # + [markdown] deletable=true editable=true # ## 4. Augment the Data Set # + deletable=true editable=true import cv2 from tqdm import tqdm from sklearn.utils import shuffle def random_transform_image(dataset, index): # Hyperparameters # Values inspired from <NAME> and <NAME> Paper : Traffic Sign Recognition with Multi-Scale Convolutional Networks Scale_change_max = 0.1 Translation_max = 2 #pixels Rotation_max = 15 #degrees Brightness_max = 0.1 # Generate random transformation values trans_x = np.random.uniform(-Translation_max,Translation_max) trans_y = np.random.uniform(-Translation_max,Translation_max) angle = np.random.uniform(-Rotation_max,Rotation_max) scale = np.random.uniform(1-Scale_change_max,1+Scale_change_max) bright = np.random.uniform(-Brightness_max,Brightness_max) #Brightness #create white image white_img = 255*np.ones((32,32,3), np.uint8) black_img = np.zeros((32,32,3), np.uint8) if bright >= 0: img = cv2.addWeighted(dataset[index].squeeze(),1-bright,white_img,bright,0) else: img = cv2.addWeighted(dataset[index].squeeze(),bright+1,black_img,bright*-1,0) # Scale img = cv2.resize(img,None,fx=scale, fy=scale, interpolation = cv2.INTER_CUBIC) # Get image shape afeter scaling rows,cols,chan = img.shape # Pad with zeroes before rotation if image shape is less than 32*32*3 if rows < 32: offset = int((32-img.shape[0])/2) # If shape is an even number if img.shape[0] %2 == 0: img = cv2.copyMakeBorder(img,offset,offset,offset,offset,cv2.BORDER_CONSTANT,value=[0,0,0]) else: img = cv2.copyMakeBorder(img,offset,offset+1,offset+1,offset,cv2.BORDER_CONSTANT,value=[0,0,0]) # Update image shape after padding rows,cols,chan = img.shape # Rotate M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1) img = cv2.warpAffine(img,M,(cols,rows)) # Translation M = np.float32([[1,0,trans_x],[0,1,trans_y]]) img = cv2.warpAffine(img,M,(cols,rows)) # Crop centered if image shape is greater than 32*32*3 if rows > 32: offset = int((img.shape[0]-32)/2) img = img[offset: 32 + offset, offset: 32 + offset] return img # Parameters # Max example number per class num_example_per_class = np.bincount(y_train) min_example_num = max(num_example_per_class) for i in range(len(num_example_per_class)): # Update number of examples by class num_example_per_class = np.bincount(y_train) # If the class lacks examples... if num_example_per_class[i] < min_example_num: # Locate where pictures of this class are located in the training set.. pictures = np.array(np.where(y_train == i)).T # Compute the number of pictures to be generated num_example_to_generate = min_example_num - num_example_per_class[i] # Compute the number of iteration necessary on the real data num_iter = int( num_example_to_generate/len(pictures) ) + 1 # Compute the pool of real data necessary to fill the classes if num_iter == 1 : num_pictures = num_example_to_generate else: num_pictures = len(pictures) # # Limit the number of iteration to 10 # num_iter = min(num_iter, 10) # Create empty list more_X = [] more_y = [] for k in range(num_iter): # if we are in the last iteration, num_pictures is adjusted to fit the min_example_num if (k == num_iter - 1) and (num_iter > 1): num_pictures = min_example_num - num_iter * len(pictures) # For each pictures of this class, generate 1 more synthetic image pbar = tqdm(range(num_pictures), desc='Iter {:>2}/{}'.format(i+1, len(num_example_per_class)), unit='examples') for j in pbar: # Append the transformed picture more_X.append(random_transform_image(X_train,pictures[j])) # Append the class number more_y.append(i) # Append the synthetic images to the training set X_train = np.append(X_train, np.array(more_X), axis=0) y_train = np.append(y_train, np.array(more_y), axis=0) print("New training feature shape",X_train.shape) print("New training label shape",y_train.shape) print("Data augmentation done!") # + [markdown] deletable=true editable=true # ## 5. Show a sample of the augmented dataset # + deletable=true editable=true # Visualization show_training_dataset_histogram(y_train,y_valid,y_test) show_sample(X_train, y_train, histogram = 0, sample_num = 8, sample_index = 35000) # + [markdown] deletable=true editable=true # ## 6. Pre-process functions # + deletable=true editable=true import cv2 from numpy import newaxis def equalize_Y_histogram(features): images = [] for image in features: # Convert RGB to YUV temp = cv2.cvtColor(image, cv2.COLOR_BGR2YUV); # Equalize Y histogram in order to get better contrast accross the dataset temp[:,:,0] = cv2.equalizeHist(temp[:,:,0]) # Convert back YUV to RGB temp = cv2.cvtColor(temp, cv2.COLOR_YUV2BGR) images.append(temp) return np.array(images) def CLAHE_contrast_normalization(features): images = [] for image in features: # create a CLAHE object clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4)) temp = clahe.apply(image) images.append(temp) return np.array(images) def convert_to_grayscale(features): gray_images = [] for image in features: # Convert RGB to grayscale temp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray_images.append(temp) return np.array(gray_images) def normalize_grayscale(image_data): """ Normalize the image data with Min-Max scaling to a range of [0.1, 0.9] :param image_data: The image data to be normalized :return: Normalized image data """ a = 0.1 b = 0.9 image_data_norm = a + ((image_data - np.amin(image_data))*(b-a))/(np.amax(image_data) - np.amin(image_data)) return image_data_norm # + [markdown] deletable=true editable=true # ## 7. Show a sample of the preprocess functions outputs # + deletable=true editable=true index = 255 X_temp1 = convert_to_grayscale(X_train) X_temp2 = CLAHE_contrast_normalization(X_temp1) X_temp3 = normalize_grayscale(X_temp2) show_sample(X_train, y_train, histogram = 1, sample_num = 1, sample_index = index) show_sample(X_temp1, y_train, histogram = 1, sample_num = 1, sample_index = index, color_map ='gray') show_sample(X_temp2, y_train, histogram = 1, sample_num = 1, sample_index = index, color_map ='gray') print(X_temp2[index]) print(X_temp3[index]) # + [markdown] deletable=true editable=true # ## 8. Preprocess the Dataset # + deletable=true editable=true #Preprocessing pipeline print('Preprocessing training features...') X_train = convert_to_grayscale(X_train) X_train = CLAHE_contrast_normalization(X_train) X_train = normalize_grayscale(X_train) X_train = X_train[..., newaxis] print("Processed shape =", X_train.shape) print('Preprocessing validation features...') X_valid = convert_to_grayscale(X_valid) X_valid = CLAHE_contrast_normalization(X_valid) X_valid = normalize_grayscale(X_valid) X_valid = X_valid[..., newaxis] print("Processed shape =", X_valid.shape) print('Preprocessing test features...') X_test = convert_to_grayscale(X_test) X_test = CLAHE_contrast_normalization(X_test) X_test = normalize_grayscale(X_test) X_test = X_test[..., newaxis] print("Processed shape =", X_test.shape) # Shuffle the training dataset X_train, y_train = shuffle(X_train, y_train) print("Pre-processing done!") # + [markdown] deletable=true editable=true # ## 9. Model Architecture # # [//]: # (Image References) # [image1]: ./examples/architecture.png "Conv Net Architecture" # # ![alt text][image1] # # | Layer | Description | Input | Output | # |:-------------:|:---------------------------------------------:|:-----------------:|:---------------------------:| # | Input | 32x32x1 Grayscale image | Image | Convolution 1 | # | Convolution 1 | 1x1 stride, valid padding, outputs 28x28x100 | Input | RELU | # | RELU 1 | | Convolution 1 | Max Pooling 1 | # | Max pooling 1 | 2x2 stride, outputs 14x14x100 | RELU 1 | Convolution 2, Max Pooling 3| # | Convolution 2 | 1x1 stride, valid padding, outputs 10x10x200 | Max pooling 1 | RELU 2 | # | RELU 2 | | Convolution 2 | Max pooling 2 | # | Max pooling 2 | 2x2 stride, outputs 5x5x200 | RELU 2 | Flatten 2 | # | Max pooling 3 | 2x2 stride, outputs 7x7x100 | Max pooling 1 | Flatten 1 | # | Flatten 1 | Input = 7x7x100, Output = 4900 | Max pooling 3 | Concatenate 1 | # | Flatten 2 | Input = 5x5x200, Output = 5000 | Max pooling 2 | Concatenate 1 | # | Concatenate 1 | Input1 = 4900, Input1 = 5000, Output = 9900 | Max pooling 2 and 3 |Fully connected | # | Fully connected | Fully Connected. Input = 9900, Output = 100 | Concatenate 1 | Dropout | # | Dropout | Keep prob = 0.75 | Fully connected | Softmax | # | Softmax | Fully Connected. Input = 100, Output = 43 | Dropout | Probabilities | # + deletable=true editable=true import tensorflow as tf from tensorflow.contrib.layers import flatten def model(x, keep_prob): # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer mu = 0 sigma = 0.1 # Network Parameters n_classes = 10 # MNIST total classes (0-9 digits) filter_size = 5 # Store layers weight & bias weights = { 'wc1' : tf.Variable(tf.truncated_normal([filter_size, filter_size, 1, 100], mean = mu, stddev = sigma)), 'wc2' : tf.Variable(tf.truncated_normal([filter_size, filter_size, 100, 200], mean = mu, stddev = sigma)), 'wfc1': tf.Variable(tf.truncated_normal([9900, 100], mean = mu, stddev = sigma)), 'out' : tf.Variable(tf.truncated_normal([100, n_classes], mean = mu, stddev = sigma))} biases = { 'bc1' : tf.Variable(tf.zeros([100])), 'bc2' : tf.Variable(tf.zeros([200])), 'bfc1': tf.Variable(tf.zeros([100])), 'out' : tf.Variable(tf.zeros([n_classes]))} def conv2d(x, W, b, strides=1., padding='SAME'): x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding=padding) x = tf.nn.bias_add(x, b) return tf.nn.relu(x) def maxpool2d(x, k=2, padding='SAME'): return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding=padding) # Layer 1: Convolution 1 - 32*32*1 to 28*28*100 conv1 = conv2d(x, weights['wc1'], biases['bc1'], padding='VALID') # Max Pool - 28*28*100 to 14*14*100 conv1 = maxpool2d(conv1, k=2) # Layer 2: Convolution 2 - 14*14*100 to 10*10*200 conv2 = conv2d(conv1, weights['wc2'], biases['bc2'], padding='VALID') # Max Pool - 10*10*200 to 5*5*200 conv2 = maxpool2d(conv2, k=2) #Fork second max pool - 14*14*100 to 7*7*100 conv1 = maxpool2d(conv1, k=2) #Flatten conv1. Input = 7*7*100, Output = 4900 conv1 = tf.contrib.layers.flatten(conv1) # Flatten conv2. Input = 5x5x200. Output = 5000. conv2 = tf.contrib.layers.flatten(conv2) # Concatenate flat = tf.concat(1,[conv1,conv2]) # Layer 3 : Fully Connected. Input = 9900. Output = 100. fc1 = tf.add(tf.matmul(flat, weights['wfc1']), biases['bfc1']) fc1 = tf.nn.relu(fc1) fc1 = tf.nn.dropout(fc1, keep_prob) # Layer 4: Fully Connected. Input = 100. Output = n_classes. logits = tf.add(tf.matmul(fc1, weights['out']), biases['out']) return logits # + [markdown] deletable=true editable=true # ## 10. Train, Validate and Test the Model # + [markdown] deletable=true editable=true # A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation # sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting. # + deletable=true editable=true ### Train your model here. ### Calculate and report the accuracy on the training and validation set. ### Once a final model architecture is selected, ### the accuracy on the test set should be calculated and reported as well. ### Feel free to use as many code cells as needed. #Hyperparameters EPOCHS = 100 #Max EPOCH number, if ever early stopping doesn't kick in BATCH_SIZE = 256 #Max batch size rate = 0.001 #Base learning rate keep_probability = 0.75 #Keep probability for dropout.. max_iter_wo_improvmnt = 3000 #For early stopping # + [markdown] deletable=true editable=true # ## 11. Features and Labels # # `x` is a placeholder for a batch of input images. # `y` is a placeholder for a batch of output labels. # + deletable=true editable=true #Declare placeholder tensors x = tf.placeholder(tf.float32, (None, 32, 32, 1)) y = tf.placeholder(tf.int32, (None)) keep_prob = tf.placeholder(tf.float32) one_hot_y = tf.one_hot(y, n_classes) # + [markdown] deletable=true editable=true # ## 12. Training Pipeline # Create a training pipeline that uses the model to classify German Traffic Sign Benchmarks data. # + deletable=true editable=true logits = model(x, keep_prob) probabilities = tf.nn.softmax(logits) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate = rate) training_operation = optimizer.minimize(loss_operation) # + [markdown] deletable=true editable=true # ## 13. Model Evaluation # Evaluate how well the loss and accuracy of the model for a given dataset. # + deletable=true editable=true correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver() def evaluate(X_data, y_data): num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, BATCH_SIZE): batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE] accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0}) total_accuracy += (accuracy * len(batch_x)) return total_accuracy / num_examples # + [markdown] deletable=true editable=true # ## 14. Train the Model # Run the training data through the training pipeline to train the model. # # Before each epoch, shuffle the training set. # # After each epoch, measure the loss and accuracy of the validation set. # # Save the model after training. # + deletable=true editable=true from sklearn.utils import shuffle with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) # Max iteration number without improvement max_interation_num_wo_improv = 1000 print("Training...") iteration = 0 best_valid_accuracy = 0 best_accuracy_iter = 0 stop = 0 print() for i in range(EPOCHS): X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, BATCH_SIZE): iteration = iteration + 1 end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: keep_probability}) # After 10 Epochs, for every 200 iterations validation accuracy is checked if (iteration % 200 == 0 and i > 10): validation_accuracy = evaluate(X_valid, y_valid) if validation_accuracy > best_valid_accuracy: best_valid_accuracy = validation_accuracy best_accuracy_iter = iteration saver = tf.train.Saver() saver.save(sess, './best_model') print("Improvement found, model saved!") stop = 0 # Stopping criteria : if not improvement since 1000 iterations stop training if (iteration - best_accuracy_iter) > max_iter_wo_improvmnt: print("Stopping criteria met..") stop = 1 validation_accuracy = evaluate(X_valid, y_valid) print("EPOCH {} ...".format(i+1)) print("Validation Accuracy = {:.3f}".format(validation_accuracy)) print() if stop == 1: break # saver.save(sess, './lenet') # print("Model saved") # + [markdown] deletable=true editable=true # ## 15. Evaluate accuracy of the different data sets # + deletable=true editable=true ### Load the images and plot them here. ### Feel free to use as many code cells as needed. with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) print("Evaluating..") train_accuracy = evaluate(X_train, y_train) print("Train Accuracy = {:.3f}".format(train_accuracy)) valid_accuracy = evaluate(X_valid, y_valid) print("Valid Accuracy = {:.3f}".format(valid_accuracy)) test_accuracy = evaluate(X_test, y_test) print("Test Accuracy = {:.3f}".format(test_accuracy)) # + [markdown] deletable=true editable=true # --- # # ## Step 3: Test a Model on New Images # # To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type. # # You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name. # + [markdown] deletable=true editable=true # ## 16. Load and Show the Images # + deletable=true editable=true import matplotlib.pyplot as plt import matplotlib.image as mpimg import os test_images = os.listdir('traffic-signs-data/web_found_signs/') X_web = [] for file in test_images: image = mpimg.imread('traffic-signs-data/web_found_signs/' + file) plt.imshow(image) plt.show() print("Loaded ", file) X_web.append(image) X_web = np.array(X_web) # Preprocess images print('Preprocessing features...') X_web = equalize_Y_histogram(X_web) X_web = convert_to_grayscale(X_web) X_web = normalize_grayscale(X_web) X_web = X_web[..., newaxis] print("Processed shape =", X_web.shape) # + [markdown] deletable=true editable=true # ## 17. Predict the Sign Type for Each Image # + deletable=true editable=true ### Run the predictions here and use the model to output the prediction for each image. ### Make sure to pre-process the images with the same pre-processing pipeline used earlier. ### Feel free to use as many code cells as needed. import tensorflow as tf # hardcoded.. y_web = [9,22,2,18,1,17,4,10,38,4,4,23] #We have to set the keep probability to 1.0 in the model.. with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) logits_web = sess.run(tf.argmax(logits,1), feed_dict={x: X_web, keep_prob: 1.0}) print("Prediction =", logits_web) # show_sample(X_web, logits_web, histogram = 0, sample_num = len(test_images), sample_index = 0, color_map = 'gray') #Number of column to show sample_num = len(test_images) col_num = 4 if sample_num%col_num == 0: row_num = int(sample_num/col_num) else: row_num = int(sample_num/col_num)+1 #Create training sample plot f, axarr = plt.subplots(row_num, col_num, figsize=(col_num*4,(row_num+1)*2)) signnames = pd.read_csv('signnames.csv') for i in range(0, sample_num, 1): image = X_web[i].squeeze() if logits_web[i] != y_web[i]: color_str = 'red' else: color_str = 'green' title_str = 'Predicted : %s \n Real: %s' % (signnames.iloc[logits_web[i], 1],signnames.iloc[y_web[i], 1]) axarr[int(i/col_num),i%col_num].set_title(title_str, color = color_str) axarr[int(i/col_num),i%col_num].imshow(image,'gray') axarr[int(i/col_num),i%col_num].axis('off') axarr[int(i/col_num),i%col_num].axis('off') axarr[int(i/col_num),i%col_num].axis('off') f.tight_layout() plt.show() # + [markdown] deletable=true editable=true # ## 18. Analyze Performance # + deletable=true editable=true ### Calculate the accuracy for these 5 new images. with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) test_accuracy = evaluate(X_web, y_web) print("Web images Accuracy = {:.3f}".format(test_accuracy)) # + [markdown] deletable=true editable=true # ## 19. Output Top 5 Softmax Probabilities For Each Image Found on the Web # + [markdown] deletable=true editable=true # For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here. # # The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image. # # `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids. # # Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tk.nn.top_k` is used to choose the three classes with the highest probability: # # ``` # # (5, 6) array # a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497, # 0.12789202], # [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401, # 0.15899337], # [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 , # 0.23892179], # [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 , # 0.16505091], # [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137, # 0.09155967]]) # ``` # # Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces: # # ``` # TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202], # [ 0.28086119, 0.27569815, 0.18063401], # [ 0.26076848, 0.23892179, 0.23664738], # [ 0.29198961, 0.26234032, 0.16505091], # [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5], # [0, 1, 4], # [0, 5, 1], # [1, 3, 5], # [1, 4, 3]], dtype=int32)) # ``` # # Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices. # + deletable=true editable=true ### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web. ### Feel free to use as many code cells as needed. import matplotlib.gridspec as gridspec with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) softmax_prob = sess.run(tf.nn.top_k(probabilities,k = 5), feed_dict={x: X_web, keep_prob: 1.0}) signnames = pd.read_csv('signnames.csv') for i in range(len(test_images)): plt.figure(figsize = (6,2)) gs = gridspec.GridSpec(1, 2,width_ratios=[2,3]) plt.subplot(gs[0]) plt.imshow(X_web[i].squeeze(),cmap="gray") plt.axis('off') plt.subplot(gs[1]) plt.barh(6-np.arange(5),softmax_prob[0][i], align='center') if logits_web[i] != y_web[i]: color_str = 'red' else: color_str = 'green' for i_label in range(5): temp_string = "%.1f %% : %s" % (softmax_prob[0][i][i_label]*100, str(signnames.iloc[softmax_prob[1][i][i_label], 1])) plt.text(softmax_prob[0][i][0]*1.1,6-i_label-.15, temp_string, color = color_str) plt.show() # + [markdown] deletable=true editable=true # --- # # ## Step 4: Visualize the Neural Network's State with Test Images # # This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol. # # Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable. # # For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image. # # <figure> # <img src="visualize_cnn.png" width="380" alt="Combined Image" /> # <figcaption> # <p></p> # <p style="text-align: center;"> Your output should look something like this (above)</p> # </figcaption> # </figure> # <p></p> # # + deletable=true editable=true ### Visualize your network's feature maps here. ### Feel free to use as many code cells as needed. # image_input: the test image being fed into the network to produce the feature maps # tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer # activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output # plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1): # Here make sure to preprocess your image_input in a way your network expects # with size, normalization, ect if needed # image_input = # Note: x should be the same name as your network's tensorflow data placeholder variable # If you get an error tf_activation is not defined it maybe having trouble accessing the variable from inside a function activation = tf_activation.eval(session=sess,feed_dict={x : image_input}) featuremaps = activation.shape[3] plt.figure(plt_num, figsize=(15,15)) for featuremap in range(featuremaps): plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number if activation_min != -1 & activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray") elif activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray") elif activation_min !=-1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray") else: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray") # + [markdown] deletable=true editable=true # ### Question 9 # # Discuss how you used the visual output of your trained network's feature maps to show that it had learned to look for interesting characteristics in traffic sign images # # + [markdown] deletable=true editable=true # **Answer:** # + [markdown] deletable=true editable=true # > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n", # "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. # + [markdown] deletable=true editable=true # ### Project Writeup # # Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
CarND-Traffic-Sign-Classifier-P2/Traffic_Sign_ClassifierCIfar10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <i>How would you define Machine Learning?</i> # <i>Can you name four different types of problems where it shines?</i> # <i>What is a labeled training set?</i> # <i>What are the two most common supervised tasks? </i> # <i>Can you name four common unsupervised tasks?</i> # <i>What type of Machine Learning algorithm would you use to allow a robot to walk in various unknown terrains? # <i>What type of algorithm would you use to segment your customers into multiple groups?</i> # <i>Would you frame the problem of spam detection as a supervised learning problem or an unsupervised learning problem?</i> # <i>What is an online learning system?</i> # <i>What is out-of-core learning?</i> # <i>What type of learning algorithm relies on a similarity measure to make predictions?</i> # <i>What is the difference between a model parameter and a learning algorithm's hyperparameter?</i> # <i>What do model-based learning algorithms search for? What is the most common strategy they use to succeed? How do they make predictions? </i> # <i>Can you name four of the main challenges in Machine Learning?</i> # <i>If your model performs great on the training data but generalizes poorly to new instances, what is happening? Can you name three possible solutions?</i> # <i>What is a test set and why would you want to use it?</i> # <i>What is the purpose of a validation set?</i> # <i>What can go wrong if you tune hyperparameters using the test set?</i> # <i>What is cross-validation and why would you prefer it to a validation set?</i>
Hands-On Machine Learning with Scikit-Learn & TensorFlow/Cap.01 - The Fundamentals of Machine Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using autograd to calculate the gradient of a log-likelihood # # It is straightforward to use the automatic differentiation library [autograd](https://github.com/HIPS/autograd) to take the derivative of log-likelihoods defined in pints. Below is an example of how to do this. # # WARNING: We currently find this method of caculating model sensitivities to be quite slow for most time-series models, and so do not recommended it for use. # + import matplotlib.pyplot as plt import pints import pints.toy as toy import numpy as np import warnings try: import autograd.numpy as np from autograd.scipy.integrate import odeint from autograd.builtins import tuple from autograd import grad except ImportError: print("""This example requires autograd, which is not a pints dependency. If you see this warning, try `pip install autograd`""") exit(0) from timeit import repeat # - # We begin be defining a model, identical to the [Fitzhugh Nagumo](https://pints.readthedocs.io/en/latest/toy/fitzhugh_nagumo_model.html) toy model implemented in pints. The corresponding toy model in pints has its `evaluateS1()` method defined, so we can compare the results using automatic differentiation. class AutoGradFitzhughNagumoModel(pints.ForwardModel): def simulate(self, parameters, times): y0 = np.array([-1, 1], dtype=float) def rhs(y, t, p): V, R = y a, b, c = p dV_dt = (V - V**3 / 3 + R) * c dR_dt = (V - a + b * R) / -c return np.array([dV_dt, dR_dt]) return odeint(rhs, y0, times, tuple((parameters,))) def n_parameters(self): return 3 def n_outputs(self): return 2 # Now we wrap an existing pints likelihood class, and use the `autograd.grad` function to calculate the gradient of the given log-likelihood # + class AutoGradLogLikelihood(pints.ProblemLogLikelihood): def __init__(self, likelihood): self.likelihood = likelihood f = lambda x: self.likelihood(x) self.likelihood_grad = grad(f) def __call__(self, x): return self.likelihood(x) def evaluateS1(self, x): values = self.likelihood(x) gradient = self.likelihood_grad(x) return values, gradient def n_parameters(self): return self.likelihood.n_parameters() autograd_model = AutoGradFitzhughNagumoModel() pints_model = pints.toy.FitzhughNagumoModel() # - # Now create some toy data and ensure that the new model gives the same output as the toy model in pints # + # Create some toy data real_parameters = np.array(pints_model.suggested_parameters(), dtype='float64') times = pints_model.suggested_times() pints_values = pints_model.simulate(real_parameters, times) autograd_values = autograd_model.simulate(real_parameters, times) plt.figure() plt.plot(times, autograd_values) plt.plot(times, pints_values) plt.show() # - # Add some noise to the values, and then create log-likelihoods using both the new model, and the pints model # + noise = 0.1 values = pints_values + np.random.normal(0, noise, pints_values.shape) # Create an object with links to the model and time series autograd_problem = pints.MultiOutputProblem(autograd_model, times, values) pints_problem = pints.MultiOutputProblem(pints_model, times, values) # Create a log-likelihood function autograd_log_likelihood = pints.GaussianKnownSigmaLogLikelihood(autograd_problem, noise) autograd_likelihood = AutoGradLogLikelihood(autograd_log_likelihood) pints_log_likelihood = pints.GaussianKnownSigmaLogLikelihood(pints_problem, noise) # - # We can calculate the gradients of both likelihood functions at the given parameters to make sure that they are the same autograd_likelihood.evaluateS1(real_parameters) pints_log_likelihood.evaluateS1(real_parameters) # Now we'll time both functions. You can see that the function using `autgrad` is significantly slower than the in-built `evaluateS1` function for the pints model. For reference, this function uses forward-mode sensitivity calculation using the symbolic Jacobian of the model. # + statement = 'autograd_likelihood.evaluateS1(real_parameters)' setup = 'from __main__ import autograd_likelihood, real_parameters' time_taken = min(repeat(stmt=statement, setup=setup, number=1, repeat=5)) 'Elapsed time: {:.0f} ms'.format(1000. * time_taken) # + statement = 'pints_log_likelihood.evaluateS1(real_parameters)' setup = 'from __main__ import pints_log_likelihood, real_parameters' time_taken = min(repeat(stmt=statement, setup=setup, number=1, repeat=5)) 'Elapsed time: {:.0f} ms'.format(1000. * time_taken)
examples/toy/automatic-differentiation-using-autograd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Processing data from GHTorrent # # ### PLEASE DO NOT RUN THIS CODE - ONLY for reproduceability # The processed repos are already saved under *data/ghtorrent_python_notdeleted_repos_updated2019* # # We process the mysql files from GHTorrent using a python script. # # We get only the following tables: # - projects # - project_languages # - repo_labels # - watchers # # we get all repositories from the table 'projects' with the following: # - language=Python # - Not deleted # - Not forked from any other repo # - has a url and name # - last updated in 2019 # # The data is saved in a local postgres database and then copied as csv under *data/ghtorrent_python_notdeleted_repos_updated2019*. # + import psycopg2 import pandas as pd import numpy as np # - # DOWNLOAD mysql-2019-06-01.tar from GHTORRENT: # http://ghtorrent-downloads.ewi.tudelft.nl/mysql/mysql-2019-06-01.tar.gz # then extract them and replace below with path for all csv files path = "data/mysql-2019-06-01.tar/mysql-2019-06-01/{}.csv" def insert_to_ghtorrent_db(table, rows): """ insert multiple rows into the vendors table """ insert_sql = '''INSERT INTO public.{}({}) VALUES ({});'''.format(table,','.join(table_columns[table]['columns']), ','.join(['%s']*len(table_columns[table]['columns']))) conn = None try: # Please define here a local database to use for insterting the data conn = psycopg2.connect(database="ghtorrent_restore", user="ghtorrentuser", password="<PASSWORD>") # create a new cursor cur = conn.cursor() cur.executemany(insert_sql,rows) conn.commit() cur.close() except (Exception, psycopg2.DatabaseError) as error: print("error while inserting") print(error) finally: if conn is not None: conn.close() # + table_columns = { 'projects':{ 'columns':['id', 'url', 'owner_id', 'name', 'description', 'language', 'created_at', 'forked_from', 'deleted', 'updated_at', 'forked_commit_id'], 'filter_on': ''}, #787034 'project_languages': {'columns':['project_id', 'language', 'bytes', 'created_at'], 'filter_on': 'project_id'}, 'repo_labels': {'columns':['id', 'repo_id', 'name'], 'filter_on': 'repo_id'}, 'watchers': {'columns':['repo_id', 'user_id', 'created_at'], 'filter_on': 'repo_id'}, } # we get all repositories: # - Python # - Not deleted # - Not forked from any other repo # - has a url and name # - last updated in 2019 def filter_data(table, data, filter_on=None): if table == 'projects': languages = [ 'Python' ] data = data[(data['language'].isin(languages)) & (data['deleted'] == '0') & (data['forked_from'].isna()) & (data['url'].notna()) & (data['name'].notna()) & (data['name'].notna()) & (data['updated_at'].str.startswith('2019')) ] elif filter_on is not None: project_ids = (pd.read_csv(path.format('projects_ids'), names=['ids'], na_values="\\N"))['ids'].values.tolist() data = data[data[filter_on].isin(project_ids) ] return data def insert_chunks_ghtorrent(table, chunksize = 50000): i = 0 for chunk in pd.read_csv(path.format(table), names=(table_columns[table])['columns'], chunksize=chunksize, na_values="\\N"): chunk = filter_data(table, chunk, filter_on = (table_columns[table])['filter_on']) chunk = chunk.replace({np.nan: None}) print('adding ', len(chunk), ' rows ') i += len(chunk) if len(chunk) > 0: insert_to_ghtorrent_db(table, chunk.values.tolist()) print('added a total of ', i, ' rows ') def insert_tables_ghtorrent(): for t in table_columns.keys(): if t != 'projects': print('processing table', t) insert_chunks_ghtorrent(t) # - insert_tables_ghtorrent() watchers['watchers'].value_counts()
code/0_data-preparation/1_fetching-ghtorrent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Post-pandemic travel lodging # <hr/> # # Investigation of insights that be found within the google survey: Post-Pandemic Travel Lodging sns.palplot(sns.cubehelix_palette()) sns.palplot(sns.cubehelix_palette(start=2.8, rot=.1)) # + # Libaries import rpy2 import pandas as pd # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns; sns.set_theme() sns.set_style('ticks') import plotly.graph_objects as go import colorlover as cl # NLP algorithms from sklearn.feature_extraction.text import CountVectorizer from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer # + # # %load_ext rpy2.ipython # - # # data DICT # |---|---| # |:---:|: # # Content # # - [Data Ingestion](#Data-Ingestion) # - [Functions](#Functions) # - [EDA](#EDA) # - [Age](#Age) # - [Zipcode](#zipcode) # - [How often do you usually travel per year?](#How-often-do-you-usually-travel-per-year?) # - [What type of lodging do you most often stay at?](#What-type-of-lodging-do-you-most-often-stay-at?) # - [Who do you usually travel with?](#Who-do-you-usually-travel-with?) # - [How many people do you travel with?](#How-many-people-do-you-travel-with?) # - [Do you prefer to use an app or website when booking your travel accommodations?](#Do-you-prefer-to-use-an-app-or-website-when-booking-your-travel-accommodations?) # - [Have you travelled during the pandemic?](#Have-you-travelled-during-the-pandemic?) # - [If YES, what type of lodging did you stay at and why?](#If-YES,--what-type-of-lodging-did-you-stay-at-and-why?) # - [If YES, did you have any concerns or difficulties during your stay in relation to COVID?](#If-YES,--did-you-have-any-concerns-or-difficulties-during-your-stay-in-relation-to-COVID?) # - [Why do you typically choose that lodging option?](#Why-do-you-typically-choose-that-lodging-option?) # - ["Bed and Breakfast", what comes to mind](#"Bed-and-Breakfast",-what-comes-to-mind) # - [How do you typically find places to stay at while traveling? (rank from 1-4)](#How-do-you-typically-find-places-to-stay-at-while-traveling?-(rank-from-1-4)) # - [# When you book a vacation, what matters to you? (rank from 1-6)](#When-you-book-a-vacation,-what-matters-to-you?-(rank-from-1-6)) # Subsets of survey # # # Data Ingestion # <hr> # + # read Survey df1 = pd.read_excel('../data/Post-pandemic_travel_lodging-1.xlsx', head = None ) print(df1.shape) df1.head(1) # + # Rename columns df1.columns = ['timestamp', 'age', 'zipcode', 'term_bnb', 'trav_per_year', 'type_lodging', 'why_lodging', 'who_travel', 'num_people', 'find_recommendations', 'find_social', 'find_google', 'find_online', 'app_website', 'matters_tours', 'matters_food', 'matters_relax', 'matters_shopping', 'matters_meeting' , 'matters_immersing', 'covid_travel', 'covid_lodging', 'covid_issues'] # - # Reset index df1.drop('timestamp', axis = 1, inplace = True) #df1.reset_index(inplace = True) 'trav_per_year.yaxis.label.set_visible(False)' df1.T # --- # # Functions # <hr/> # + """ """ def prepare_table(data, column_range): dataframe = pd.DataFrame() number_of_columns = len(column_range) for each_column in column_range: new_tobe_row = round(data[each_column].value_counts(normalize = True) * 100 ,0) #print(new_tobe_row) dataframe = pd.concat([dataframe, new_tobe_row], axis =1 ) return dataframe.T # + """ from: https://morioh.com/p/374a3fbab626 """ def age_group(age): bucket = str """.apply().""" try: age = int(age) if age <= 19: bucket = ' below 20' elif age in range(20,25): bucket = '20 - 25' elif age in range(25,30): bucket = '25 - 29' elif age in range(30, 40): bucket = '30 - 39' elif age in range(40, 50): bucket = '40 - 49' elif age in range(50, 60): bucket = '50 - 59' elif age >= 60: bucket = '60+' else: bucket = type(age) return bucket except: return none # + """ creates a data frame with all the sentiment observations """ def sentiment_analyzer(data): # var data_frame = pd.DataFrame() # clean data # drop NaN data.dropna(axis = 0, inplace = True) # removes 'None' data = data [ data.str.contains('None') == False] # initiate analyzer = SentimentIntensityAnalyzer() for eachline in data: temp = analyzer.polarity_scores(eachline) data_frame= data_frame.append(temp, ignore_index = True) data_frame.rename(columns = {'neg':'negative','neu':'neutral','pos':'positive','compound':'compound'}, inplace = True) return data_frame # - # # EDA # <hr/> # ### Age # + [markdown] jupyter={"outputs_hidden": true} # # plot of ages # # df1['age_group'].hist(); # - # df1['age_group'] = df1['age'].apply(age_group) # # df1['age_group'].value_counts(normalize = True) * 100 # sns.barplot(y = df1.loc['age_group'].index, # x = df1.loc['age_group'].values, # order = sort_order, # palette = 'rocket', edgecolor = 'black', # ax = ax # ).set_title("Age {}".format('20 - 25')) # + [markdown] jupyter={"outputs_hidden": true} # # Zipcode # + # number of zipcodes df1['zipcode'].nunique() # - # + # list of zip codes df1['zipcode'].unique() # - # # # How often do you usually travel per year? # <hr/> df1['trav_per_year'].value_counts(normalize = True) sns.histplot(data = df1, x = 'trav_per_year', bins = 3,) df1['trav_per_year'].value_counts().plot(kind = 'bar', figsize=(7,5), color = 'mediumpurple'); # # What type of lodging do you most often stay at? # <hr/> # + # in % df1['type_lodging'].value_counts(normalize = True) * 100 # - df1['type_lodging'].value_counts().plot(kind = 'bar', figsize=(7,5), color = 'mediumpurple'); # # Who do you usually travel with? # <hr> # + df1['who_travel'].value_counts() # - df1['who_travel'].value_counts().plot(kind = 'bar', figsize=(7,5), color = 'mediumpurple'); # # How many people do you travel with? # <hr/> df1['num_people'].value_counts() df1['num_people'].value_counts().plot(kind = 'bar', figsize=(7,5), color = 'mediumpurple'); # # Do you prefer to use an app or website when booking your travel accommodations? # <hr/> # + # in % df1['app_website'].value_counts(normalize = True)*100 # - df1['app_website'].value_counts().plot(kind = 'bar', figsize=(7,5), color = 'mediumpurple'); # # Have you travelled during the pandemic? # <hr> # + # in % df1['covid_travel'].value_counts(normalize = True)* 100 # - df1['covid_travel'].value_counts().plot(kind = 'bar', figsize=(7,5), color = 'mediumpurple'); # # If YES, what type of lodging did you stay at and why? # <hr/> df1['covid_lodging'] # + sentiment_analyzer(df1['covid_lodging']).mean().plot(kind = 'barh'); # - # # If YES, did you have any concerns or difficulties during your stay in relation to COVID? # <hr/> df1['covid_issues'] sentiment_analyzer(df1['covid_issues']).mean().plot(kind = 'barh'); # # Why do you typically choose that lodging option? # <hr/> df1['why_lodging'] sentiment_analyzer(df1['why_lodging']).mean().plot(kind = 'barh'); # # "Bed and Breakfast", what comes to mind # <hr/> df1['term_bnb'].unique() sentiment_analyzer(df1['term_bnb']).mean().plot(kind = 'barh'); # lemitize # + # word count # + # Emotinal inflextion # + #### # - # # How do you typically find places to stay at while traveling? (rank from 1-4) # <hr/> findplaces_df = prepare_table(df1, ['find_recommendations','find_social','find_google','find_online']) findplaces_df findplaces_df[1] findplaces_df[1] = findplaces_df[1] * -1 category_order = [ 1, 2, 3, 4 ] # + fig = go.Figure(go.Bar( x=[20, 14, 23], y=['giraffes', 'orangutans', 'monkeys'], orientation='h')) fig.show() # + # sort by desired column findplaces_df = findplaces_df.sort_values(by=1, ascending = False) # initalized figure fig = go.Figure() for column in findplaces_df.columns: fig.add_trace(go.Bar( x = findplaces_df[column]b, y = findplaces_df.index, name = column, orientation = 'h', marker_color = cl.scales[str(len(category_order))]['div']['RdYlGn'][category_order.index(column)], )) fig.update_layout( barmode = 'relative', title = 'How do you typically find places to stay at while traveling' ) fig.show() # + magic_args="-i findplaces_df -w 10 -h 5 -u in -r 400" language="R" # # # replacing '.' in colnames by spaces which were lost in the # # transition from Python to R # # install.packages("HH", repos='https://CRAN.R-project.org/package=HH', quiet=TRUE) # library(HH) # # # colnames(findplaces_df) <- gsub('\\.', ' ', colnames(findplaces_df)) # likert(index ~ ., data=findplaces_df, # main="Machiavellianism", # ylab=NULL, # scales=list(y=list(relation="free")), layout=c(1,1), # positive.order=T) # - """ likert(category ~ . | question, subset(quest, subtable=='age'), main='Machiavellianism', ylab='Age categories', as.percent=T, layout=c(1,5)) likert(question ~ . | category, subset(quest, subtable=='gender'), main='Machiavellianism', ylab=NULL, as.percent=T, rightAxis=F, positive.order = T) """ # # When you book a vacation, what matters to you? (rank from 1-6) # <hr/> prepare_table(df1, ['matters_tours','matters_food','matters_relax', 'matters_shopping','matters_meeting', 'matters_immersing']).T # + # sort by desired column findplaces_df = findplaces_df.sort_values(by=1, ascending = False) # initalized figure fig = go.Figure() for column in findplaces_df.columns: fig.add_trace(go.Bar( x = findplaces_df[column]b, y = findplaces_df.index, name = column, orientation = 'h', marker_color = cl.scales[str(len(category_order))]['div']['RdYlGn'][category_order.index(column)], )) fig.update_layout( barmode = 'relative', title = 'How do you typically find places to stay at while traveling' ) fig.show() # - .yaxis.label.set_visible(False) .set_title("Age {}".format(age_group)) sort_order = groupby_count_obj.loc[age_group].sort_index().index
code/past/prep_survy-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Generate RBS Recommendations # # This notebook generates RBS recommendations based on the [dataset](https://github.com/synbiochem/opt-mva) provided by [paper](https://pubs.acs.org/doi/abs/10.1021/acssynbio.8b00398). The goal is to find the best 6-base RBS sequence (-6 ~-12 base?) which gives the maximum translation initiation rate (TIR). # # The dataset has 56 arms with 113 data points, which each sequence is 13-base and the label is the TIR. We read the whole data as initialisation and recommend 100 arms to sample in the first round, which will be done in the biology experiment. So the 100 arms should be unique and sampled from the whole possible arm space. # direct to proper path import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import itertools from collections import defaultdict import random from codes.embedding import Embedding from codes.environment import Rewards_env from codes.ucb import GPUCB, Random from codes.evaluations import evaluate, plot_eva from codes.regression_cp import Regression from codes.kernels import spectrum_kernel from codes.kernels_pairwise import spectrum_kernel_pw, mixed_spectrum_kernel_pw, WD_kernel_pw, WD_shift_kernel_pw from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import PairwiseKernel, DotProduct, RBF from sklearn.kernel_ridge import KernelRidge from ipywidgets import IntProgress from IPython.display import display import warnings from prettytable import PrettyTable # %matplotlib inline # + # Data downloaded from https://github.com/synbiochem/opt-mva # Paper https://pubs.acs.org/doi/abs/10.1021/acssynbio.8b00398 Path = '../data/RBS_list.xlsx' df = pd.read_excel(Path, sheet_name='Unique RBS') df.columns = ['A', 'B', 'C', 'D', 'E', 'F'] df.head() # + group_one = df[:158].drop_duplicates(subset =['B', 'C']) group_two = df[158:].drop_duplicates(subset =['B', 'C']) print('group one length: ', len(group_one)) print('group two length: ', len(group_two)) # + Log_flag = False # indicates whether take log label Norm_method = 'minmax' # indicates how to normalize label (one of 'mean', 'minmax', None) def normalize(df): # take log FC -- possiblely provide Gaussain distribution? if Log_flag: df['C'] = np.log(df['C']) if Norm_method == 'mean': # mean normalization df['C'] = (df['C'] - df['C'].mean())/df['C'].std() elif Norm_method == 'minmax': # min-max normalization df['C'] = (df['C'] - df['C'].min())/(df['C'].max() - df['C'].min()) else: assert Norm_method == None return df # + data_one = np.asarray(normalize(group_one)[['B', 'C']]) data_two = np.asarray(normalize(group_two)[['B', 'C']]) data = np.concatenate((data_one, data_two), axis = 0) data.shape # + # indicates whether cross validation (KFOLD) cross_val_flag = False # indicates whether plot predict_label vs. true label plot_flag = True # string kernel list nonstr_kernels = [DotProduct(), RBF()] str_kernels = [spectrum_kernel_pw, mixed_spectrum_kernel_pw, WD_kernel_pw, #WD_shift_kernel_pw ] # name dictionaries regression_name = {KernelRidge: 'KR', GaussianProcessRegressor: 'GPR'} nonstr_embedding_name = ['onehot', 'kmer'] str_embedding_name = ['label'] str_kernel_name = { spectrum_kernel_pw: 'spec', mixed_spectrum_kernel_pw: 'mspec', WD_kernel_pw: 'WD', WD_shift_kernel_pw: 'WDshift' } test_scores = {} num_exper = 10 # - # ## Non String Kernels def run_regression_nsk(data, cp, data_key): for model in [KernelRidge, GaussianProcessRegressor]: for kernel in nonstr_kernels: for embedding in nonstr_embedding_name: key = regression_name[model] + '_' + str(kernel).split('(')[0]+ '_' + embedding if key not in test_scores: test_scores[key] = defaultdict(list) reg = Regression(model(kernel = kernel), data, embedding, cp, split_idx=56) reg.run_k() test_scores[key][data_key[0]] = reg.test_scores1 if cp and len(data_key) > 1: test_scores[key][data_key[1]] = reg.test_scores2 run_regression_nsk(data, True, ['12', '21']) run_regression_nsk(data_one, False, ['11']) run_regression_nsk(data_two, False, ['22']) # ## String Kernels def run_regression_sk(data, cp, data_key): for model in [KernelRidge, GaussianProcessRegressor]: for kernel in str_kernels: for embedding in str_embedding_name: key = regression_name[model] + '_' + str_kernel_name[kernel] + '_' + embedding print(key) if key not in test_scores: test_scores[key] = defaultdict(list) if model == KernelRidge: reg = Regression(model(kernel = kernel), data, embedding, cp, split_idx=56) elif model == GaussianProcessRegressor: reg = Regression(model(kernel = PairwiseKernel(metric = kernel)), data, embedding, cp, split_idx=56) reg.run_k() test_scores[key][data_key[0]] = reg.test_scores1 if cp and len(data_key) > 1: test_scores[key][data_key[1]] = reg.test_scores2 run_regression_sk(data, True, ['12', '21']) run_regression_sk(data_one, False, ['11']) run_regression_sk(data_two, False, ['22']) # ## Compare of Cross Prediction # + y = defaultdict(dict) yerr = defaultdict(dict) for model_key, data_scores in test_scores.items(): print(model_key) x = PrettyTable() x.field_names = ['', 'Test1', 'Test2'] p = [] for data_key in sorted(data_scores): mean = np.round(np.asarray(data_scores[data_key]).mean(),2) std = np.round(np.asarray(data_scores[data_key]).std(),2) y[data_key][model_key] = mean yerr[data_key][model_key] = std p.append(str(mean) + ' +/- ' + str(std)) x.add_row(['Train1', p[0], p[1]]) x.add_row(['Train2', p[2], p[3]]) print(x) print() # - # ## Comare of RMSE scores # # 11: Train and test using group one. KFold (k= 10) # 22: Train and test using group two. KFold (k= 10) # 12: Train group one and test group two. (Using random subset of train set) # 21: Train group two and test group one. (Using random subset of train set) # + import collections for data_key, scores in y.items(): plt.figure() sorted_test_scores = sorted(scores.items(), key=lambda kv: kv[1]) sorted_test_scores = collections.OrderedDict(sorted_test_scores) sorted_test_errs = sorted(yerr[data_key].items(), key=lambda kv: kv[1]) sorted_test_errs = collections.OrderedDict(sorted_test_errs) plt.errorbar(range(len(sorted_test_scores)), list(sorted_test_scores.values()), yerr = list(sorted_test_errs.values())) #plt.bar(range(len(sorted_test_scores)), list(sorted_test_scores.values()), align='center') plt.xticks(range(len(sorted_test_scores)), list(sorted_test_scores.keys()), rotation = 70) plt.ylabel('RMSE') plt.title(data_key + ' Test Scores')
notebook/others/regression/generate_rbs_rec_with_cross_prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # Murnau SPARQL examples. # # + # querying function. import requests, pydash, pandas import altair, numpy def run_query(search, url): def value_extract(row, col): return pydash.get(row[col], 'value') r = requests.get(url, params = {'format': 'json', 'query': search}) data = pydash.get(r.json(), 'results.bindings') data = pandas.DataFrame.from_dict(data) for x in data.columns: data[x] = data.apply(value_extract, col=x, axis=1) return data # - # ##### SPARQL #1: Works, with contributors work ids. # + dataframe = run_query(""" SELECT DISTINCT ?work ?workLabel ?aca_id ?bfi_id ?eye_id ?gfa_id ?loc_id ?mom_id ?nfa_id ?sfi_id WHERE { ?work wdt:P1 wd:Q1019. OPTIONAL { ?work p:P29 [ps:P29 ?aca_id; pq:P33 wd:Q160 ]}. OPTIONAL { ?work p:P29 [ps:P29 ?bfi_id; pq:P33 wd:Q897 ]}. OPTIONAL { ?work p:P29 [ps:P29 ?eye_id; pq:P33 wd:Q530 ]}. OPTIONAL { ?work p:P29 [ps:P29 ?gfa_id; pq:P33 wd:Q820 ]}. OPTIONAL { ?work p:P29 [ps:P29 ?loc_id; pq:P33 wd:Q402 ]}. OPTIONAL { ?work p:P29 [ps:P29 ?mom_id; pq:P33 wd:Q978 ]}. OPTIONAL { ?work p:P29 [ps:P29 ?nfa_id; pq:P33 wd:Q959 ]}. OPTIONAL { ?work p:P29 [ps:P29 ?sfi_id; pq:P33 wd:Q15 ]}. SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """, 'http://172.16.17.32:8989/bigdata/sparql') dataframe = dataframe.pivot_table(index=['work', 'workLabel'], aggfunc=lambda x: ', '.join(sorted(x.unique().astype(str)))).reset_index() print(len(dataframe)) dataframe.head(25) # - # ##### SPARQL #2: Lost works, as far as can be ascertained from contributed data. # + dataframe = run_query(""" SELECT DISTINCT ?work ?workLabel ?item WHERE { ?work wdt:P1 wd:Q1019. OPTIONAL {?work wdt:P12 ?manifestation. OPTIONAL {?manifestation wdt:P14 ?item}}. SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """, 'http://172.16.17.32:8989/bigdata/sparql') dataframe = dataframe.pivot_table(index=['work', 'workLabel'], aggfunc=lambda x: ', '.join(sorted(x.unique().astype(str)))).reset_index() dataframe = dataframe.loc[dataframe.item.isin(['None'])] print(len(dataframe)) dataframe.head(20) # - # ##### SPARQL #3: Murnau works forbidden in Sweden. # + dataframe = run_query(""" SELECT DISTINCT ?work ?workLabel WHERE { ?work p:P8 ?event. ?event pq:P35 "Totalförbjuden". ?event pq:P36 wd:Q955. SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """, 'http://167.99.135.149:8989/bigdata/sparql') print(len(dataframe)) dataframe.head() # - # ##### SPARQL #4: Murnau agents' birthdates contributed by the EYE, against BFI authority ids. # + dataframe = run_query(""" SELECT ?agentLabel ?birth ?bfi_id WHERE { ?agent wdt:P1 wd:Q1017; p:P8 [ps:P8 ?birth; pq:P31 wd:Q945 ]; p:P29 [ps:P29 ?bfi_id; pq:P33 wd:Q897 ] SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """, 'http://172.16.17.32:8989/bigdata/sparql') print(len(dataframe)) dataframe.head(40) # - # ##### SPARQL #5: Breakdown of carrier types per contributor. # + dataframe = run_query(""" SELECT DISTINCT ?work ?workLabel ?item ?carrierLabel ?heldLabel WHERE { ?work wdt:P1 wd:Q1019. ?work wdt:P12 ?manifestation. ?manifestation wdt:P14 ?item. ?item wdt:P17 ?carrier. ?item wdt:P11 ?held. SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """, 'http://172.16.17.32:8989/bigdata/sparql') dataframe = dataframe[['carrierLabel', 'heldLabel', 'work']] dataframe = dataframe.pivot_table(index=['carrierLabel', 'heldLabel'], aggfunc=lambda x: len(x)).reset_index() dataframe = dataframe.rename(columns={'work':'item count', 'heldLabel':'contributor', 'carrierLabel':'carrier type'}) line = altair.Chart(dataframe).mark_line(interpolate='linear').encode(x='contributor', y='item count:Q', color='carrier type') display(altair.layer(line).properties(width=600, height=300)) # - # ##### SPARQL #6: Breakdown of carrier types per work. # + dataframe = run_query(""" SELECT DISTINCT ?work ?workLabel ?item ?carrierLabel WHERE { ?work wdt:P1 wd:Q1019. ?work wdt:P12 ?manifestation. ?manifestation wdt:P14 ?item. ?item wdt:P17 ?carrier. SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """, 'http://172.16.17.32:8989/bigdata/sparql') dataframe = dataframe[['workLabel', 'carrierLabel', 'work']] dataframe = dataframe.pivot_table(index=['workLabel', 'carrierLabel'], aggfunc=lambda x: len(x)).reset_index() dataframe = dataframe.rename(columns={'work':'item count', 'workLabel':'work', 'carrierLabel':'carrier type'}) line = altair.Chart(dataframe).mark_line(interpolate='linear').encode(x='work', y='item count:Q', color='carrier type') display(altair.layer(line).properties(width=600, height=300)) # - # ##### SPARQL #7: Detail of film base per work. # + dataframe = run_query(""" SELECT DISTINCT ?work ?workLabel ?item ?carrierLabel ?baseLabel WHERE { ?work wdt:P1 wd:Q1019. ?work wdt:P12 ?manifestation. ?manifestation wdt:P14 ?item. ?item wdt:P17 ?carrier. ?item wdt:P20 ?base. FILTER ( ?carrier in (wd:Q1006,wd:Q1006) ) SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """, 'http://172.16.17.32:8989/bigdata/sparql') dataframe = dataframe[['workLabel', 'baseLabel', 'work']] dataframe = dataframe.pivot_table(index=['workLabel', 'baseLabel'], aggfunc=lambda x: len(x)).reset_index() dataframe = dataframe.rename(columns={'work':'item count', 'workLabel':'work', 'baseLabel':'film base'}) line = altair.Chart(dataframe).mark_line(interpolate='linear').encode(x='work', y='item count:Q', color='film base') display(altair.layer(line).properties(width=600, height=300)) # - # ##### SPARQL #8: Nitrate film length per work. # + dataframe = run_query(""" SELECT DISTINCT ?work ?workLabel ?item ?carrierLabel ?baseLabel ?itemLabel ?metres ?feet WHERE { ?work wdt:P1 wd:Q1019. ?work wdt:P12 ?manifestation. ?manifestation wdt:P14 ?item. ?item wdt:P17 ?carrier. ?item wdt:P20 ?base. OPTIONAL{?item wdt:P26 ?metres.} OPTIONAL{?item wdt:P27 ?feet.} FILTER ( ?carrier in ( wd:Q1006,wd:Q1006 ) ) FILTER ( ?base in ( wd:Q826,wd:Q826 ) ) # this is SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """, 'http://172.16.17.32:8989/bigdata/sparql') def feet_to_metres(row): if not row['metres']: if row['feet']: return float(row['feet'])/3.2808 else: return(row['metres']) dataframe['metres'] = dataframe.apply(feet_to_metres, axis=1).dropna() dataframe = dataframe.loc[~dataframe.metres.isin([numpy.nan])] for x in dataframe.workLabel.unique(): sample = dataframe.loc[dataframe.workLabel.isin([x])] sample = sample.rename(columns={'itemLabel':'items'}) if len(sample) > 4: print(x) line = altair.Chart(sample).mark_bar(interpolate='linear').encode(x='metres:Q', y='items') display(altair.layer(line).properties(width=700, height=300).configure_axisY(labelAlign='right')) # - # ##### SPARQL #9: Linking Wikibase to Wikidata. # + dataframe1 = run_query(""" SELECT ?work ?workLabel ?wikidata ?sfi WHERE { ?work wdt:P1 wd:Q1019; p:P29 [ps:P29 ?wikidata; pq:P33 wd:Q1027 ]; p:P29 [ps:P29 ?sfi; pq:P33 wd:Q15 ] SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """, 'http://172.16.17.32:8989/bigdata/sparql') dataframe2 = run_query(""" SELECT ?wikidata ?director ?imdb WHERE { ?wikidata wdt:P31 wd:Q11424. ?wikidata wdt:P57 ?director. ?wikidata wdt:P345 ?imdb. FILTER ( ?director in ( wd:Q55412,wd:Q55412 ) ) } """, 'https://query.wikidata.org/sparql') dataframe2['wikidata'] = dataframe2['wikidata'].str.split('/').str[-1] dataframe3 = pandas.merge(dataframe1, dataframe2, on='wikidata', how='inner') print(len(dataframe3)) dataframe3.head()
5-sparql/.ipynb_checkpoints/sparql-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SVM中使用多项式特征 import numpy as np import matplotlib.pyplot as plt # + from sklearn import datasets X, y = datasets.make_moons() # - X.shape y.shape plt.scatter(X[y==0,0], X[y==0,1]) plt.scatter(X[y==1,0], X[y==1,1]) plt.show() # + X, y = datasets.make_moons(noise=0.15, random_state=666) plt.scatter(X[y==0,0], X[y==0,1]) plt.scatter(X[y==1,0], X[y==1,1]) plt.show() # - # ### 使用多项式特征的SVM # + from sklearn.preprocessing import PolynomialFeatures, StandardScaler from sklearn.svm import LinearSVC from sklearn.pipeline import Pipeline def PolynomialSVC(degree, C=1.0): return Pipeline([ ("poly", PolynomialFeatures(degree=degree)), ("std_scaler", StandardScaler()), ("linearSVC", LinearSVC(C=C)) ]) # - poly_svc = PolynomialSVC(degree=3, C=2) poly_svc.fit(X, y) def plot_decision_boundary(model, axis): x0, x1 = np.meshgrid( np.linspace(axis[0], axis[1], int((axis[1]-axis[0])*100)).reshape(-1, 1), np.linspace(axis[2], axis[3], int((axis[3]-axis[2])*100)).reshape(-1, 1), ) X_new = np.c_[x0.ravel(), x1.ravel()] y_predict = model.predict(X_new) zz = y_predict.reshape(x0.shape) from matplotlib.colors import ListedColormap custom_cmap = ListedColormap(['#EF9A9A','#FFF59D','#90CAF9']) plt.contourf(x0, x1, zz, linewidth=5, cmap=custom_cmap) plot_decision_boundary(poly_svc, axis=[-1.5, 2.5, -1.0, 1.5]) plt.scatter(X[y==0,0], X[y==0,1]) plt.scatter(X[y==1,0], X[y==1,1]) plt.show() # ### 使用多项式核函数的SVM # + from sklearn.svm import SVC def PolynomialKernelSVC(degree, C=1.0): return Pipeline([ ("std_scaler", StandardScaler()), ("kernelSVC", SVC(kernel="poly", degree=degree, C=C)) ]) # - poly_kernel_svc = PolynomialKernelSVC(degree=3) poly_kernel_svc.fit(X, y) plot_decision_boundary(poly_kernel_svc, axis=[-1.5, 2.5, -1.0, 1.5]) plt.scatter(X[y==0,0], X[y==0,1]) plt.scatter(X[y==1,0], X[y==1,1]) plt.show()
svm/svm-poly.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Universal concepts, preamble & glossary # # ## Types of Machine Learning # There are three fundamental types of problems that machine learning algorithms are trying to solve. # # ### Supervised # This is where we have some examples of a given input and its output and we are trying to model that function based on the data. # # #### &nbsp;&nbsp;&nbsp;&nbsp; Regression # &nbsp;&nbsp;&nbsp;&nbsp;The output of our model is a number or vector of numbers which can take any real value. An example is trying to predict the price of a house based on a number of features such as number of rooms, number of windows, etc. # # # #### &nbsp;&nbsp;&nbsp;&nbsp;Classification # &nbsp;&nbsp;&nbsp;&nbsp;The output of our model, based on the inputs, is the probability of the input belonging to a class. An example is having an input image and outputting if it is a hotdog or not. # # ### Unsupervised # There is no input-output relationship which we are trying to model. Rather, we want to find some hidden structure in the data. One type is clustering in which you are trying to group your data. A use case could be segmenting your customers by their interests so you can target them with different material. # # ### Reinforcement Learning # In reinforcement learning, we have an agent, which takes in observations from an environment and takes actions based on those observations to maximize a reward function. This is inspired by pavlovian conditioning which has been shown to be the method that mammals learn by. # ## Datapoint # # Here a single data point, $x^{(1)}$ is represented as a row vector where each column is a different **feature**. # # For example, if each training example is a house, then its vector of features may include elements for its price, no. rooms, no. windows etc. # # ### $X^{(1)} = \begin{bmatrix} x^{(1)}_1 & x^{(1)}_2 & \dots & x^{(1)}_{n-1}& x^{(1)}_n \end{bmatrix}$ # # ## Design Matrix # The **design matrix**, **X** contains all of our training data. Each row represents a certain example. There are $m$ training examples. Each row represents a different feature. There are $n$ features. Hence the design matrix has dimensions of $n$ by $m$. # # ### $Design \ matrix,\ X = \begin{bmatrix} \dots & x^{(1)} &\dots \\ & \vdots & \\ \dots & x^{(m)} & \dots \end{bmatrix} = \begin{bmatrix} x_{11} \dots x_{1n} \\ \vdots \ddots \vdots \\ x_{m1} \dots x_{mn} \end{bmatrix} \in m \times n$ # ## Hypothesis # The hypothesis, $h$ is the output of your model. It is your current prediction of the mapping from input to output. # ## Loss/cost function # # # For our algorithms to learn, we need a way to evaluate their current performance, so that we can determine how to improve. We can mathematically define when our algorithm is performing well by evaluating an appropriate objective function. We usually try to minimise a function which indicates the error in our hypothesis (how bad our model is). We will represent the loss of our models with the symbol $J$. The cost function is dependent on as many dimensions as we have parameters (which are relevant to that loss function). Changing these parameters moves us around parameter space, in which the cost varies. Varying different parameters will have varying influence on how the cost changes - as such, some are more important to optimise. # # #### Mean Squared Error Loss # MSE loss is the average over all training points of the squared error between your hypothesis and the label. The factor of $\frac{1}{2}$ is often included to cancel with the power of 2 when differentiated so that no constants are present. # # ### $ J =\frac{1}{2m} \sum_{i=1}^{m}(h^{(i)} - y^{(i)})^2$ # # #### Binary Cross Entropy loss # BCE loss is used to calculate error for classification tasks. # # ### $ J = \sum_{i=1}^{m} - y^{(i)} \cdot \text{log}(h^{(i)}) + (1-y^{(i)}) \cdot \text{log}(1-h^{(i)})$ # # In classification tasks, for each class the label of a datapoint can only take binary values of 0 or 1; i.e. it *is* a member of that class or it *is not* a member of that class, and the output is usually a *confidence* value $\in [0, 1]$. # When , $y, = 0$ the first term is 'turned off' and the second term # # #### Kullback-Leibler Divergence # The KL divergence is a metric that quantifies the difference between two probability distributions, $p$ & $q$. It is used frequently in machine learning to measure the information lost when we try to represent a probability distribution in a different way (e.g. after reconstructing it from an encoding). # # ### $D_{KL}(p||q) = \sum_{i=1}^{m} p(x_i)\cdot (\text{log }p(x_i) - \text{log }q(x_i)) = \sum_{i=1}^{m} p(x_i)\cdot \text{log } \frac{p(x_i)}{q(x_i)}$ # # ##### Practically, for a normal distribution, the KL-divergence can be evaluated as : $D_{KL} = \sum_i (\sigma^2 + $ # # For a single datapoint, $x$, the KL divergence tests how similar the log probabilities of that value are and weights that difference by the value of the probability of sampling that $x$ from $p(x)$. The weighting $p(x)$ of the log difference makes the KL divergence different depending on which arrangement you compare the probability distributions in. # # Consider: # - It takes large values when the sampled probabilities for the same values are more different, and the weighting probability distribution $p(x)$ is larger. # - It takes a value of zero where the weighting probability distribution is zero. # - Where the # - The aim is often to minimise the KL divergence (the information difference between two probability distributions). # ## Gradient Descent and the learning rate # Gradient descent is the most popular optimization strategy currently used in machine learning. It has proven to be very effective even when there are millions of parameters to optimize as in the case of deep learning.<br> # Lets say we have a function # ### $J = f(x, y;\theta)$ # which we are trying to minimize by finding optimal values of $\theta$.<br> # # Gradient descent works by moving the weights that control a model in a direction that most decreases the cost. What is this direction? The gradient of a function at a point is a vector pointing in the direction which it increases fastest locally. So the direction which most *decreases* the cost function, is the negative gradient - in this case, it will be an $n+1$ (features and bias) dimensional vector containing the partial derivatives of the cost with respect to each of these model parameters. # # The negative gradient tells us the correct direction to move each weight in, but not the ideal size of the step. # # If we move the parameters by the value of the negative gradient, there is a chance that they may jump straight over the minima, perhaps to a point where the gradient is even higher! This can happen because the gradient can be greater than the distance of the parameter from its optimal position. This causes divergence, of the model parameters, instead of convergence. # So, in gradient descent, we iteratively update the weights *proportionally* to the negative gradient at their local position. This proportionality constant, which the gradient is multiplied by to get the step size, is called the **learning rate**. The learning rate should be large enough so that the algorithm converges at a suitable rate, but small enough enough to ensure that it does not diverge. # # At a minima, the parameter should stabilise because the step size is proportional to the gradient, which will be zero. # # We can utilize the gradient descent strategy only if $J$ is a differentiable function.<br> # We first start by initializing $\theta$ randomly. We then calculate $J$ and the derivative of $J$ w.r.t $\theta$. # Once we have $\frac{\partial J}{\partial\theta}$, we update $\theta$ using the following update rule: # ### $\theta := \theta - \alpha\frac{\partial J}{\partial\theta}$ # # While it is important to understand the equations, it is equally important to have an intuitive understanding of what is going on. What we are doing when we calulate the partial derivative of $J$ w.r.t $\theta_i$ is we are finding out how a small increase in $\theta_i$ affects $J$. If this leads to an increase in $J$ we decrease our $\theta_i$ as we are trying to reduce $J$. If it leads to a decrease in $J$, we increase our $\theta_i$. This explains the negative sign in the update rule. <br> # # This can easily be visualised in the case where we have two parameters. We have a surface and we are trying to find the lowest point on this surface which corresponds to the lowest value of $J$. We start at a random point on the surface and interatively calculate the direction of greatest ascent ($\frac{\partial J}{\partial\theta}$) before taking a step in the opposite direction. We scale our steps by a factor of $\alpha$. We can set different values for $\alpha$ at runtime and we will get different results. If our $\alpha$ is too high, we will overshoot the minima and if it is too low, we will take too long to get to the minima. # ![](gradientdescent.png) # # # # ### Pseudo code for SGD # #### Randomly select a batch of datapoints from the training set to train on. # #### Make a prediction of the output for those # #### Evaluate the model's cost for these predictions # #### Find rate of change of cost wrt model parameters # #### Update the model parameters proportionately to the negative gradients found above, according to: # # ### $ \theta\ \dot{=}\ \theta - \alpha \frac{\partial J}{\partial \theta}$ # # #### Repeat for defined number of epochs # ## Embedding # # #### One-hot # # If we have a classification problem, instead of having a word as a label, we can have a $K$-dimensional vector, where $K$ is the number of classes, and each element of that vector is zero except for one element that represents the true class label. This is a one-hot encoding. This is a way of representing a label numberically, using the same number of elements as there are classes. # # #### Embedding # # For one-hot encoding, each different possible label is a mutually orthogonal unit vector. All possible class labels make up a $K$-dimensional basis of vectors. This means that to be able to reach the whole range of our output space, we need a $K$- dimensional output from our model. # Alternatively, class vectors can be embedded into a lower dimensional subspace, where less than $K$-dimensional vectors can be the output of our model. This embedding is not binary (discrete), and now has a continuous range not limited within the range $[0, 1]$. # # Imagine you are training a model to predict the next word in a sentance. You don't want your ouput space to have as many dimensions as you have predictable words in your corpus. So instead, you can embed these words into a low dimensional subspace where each of them is a vector. Similar words will be closer to each other and vector algebra can be done on these vectors. # # # ![title](embedding.png)
Universal concepts, preamble & glossary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Linear algebra in Python with NumPy # # In this lab, you will have the opportunity to remember some basic concepts about linear algebra and how to use them in Python. # # Numpy is one of the most used libraries in Python for arrays manipulation. It adds to Python a set of functions that allows us to operate on large multidimensional arrays with just a few lines. So forget about writing nested loops for adding matrices! With NumPy, this is as simple as adding numbers. # # Let us import the `numpy` library and assign the alias `np` for it. We will follow this convention in almost every notebook in this course, and you'll see this in many resources outside this course as well. import numpy as np # The swiss knife of the data scientist. # ## Defining lists and numpy arrays alist = [1, 2, 3, 4, 5] # Define a python list. It looks like an np array narray = np.array([1, 2, 3, 4]) # Define a numpy array # Note the difference between a Python list and a NumPy array. # + print(alist) print(narray) print(type(alist)) print(type(narray)) # - # ## Algebraic operators on NumPy arrays vs. Python lists # # One of the common beginner mistakes is to mix up the concepts of NumPy arrays and Python lists. Just observe the next example, where we add two objects of the two mentioned types. Note that the '+' operator on NumPy arrays perform an element-wise addition, while the same operation on Python lists results in a list concatenation. Be careful while coding. Knowing this can save many headaches. print(narray + narray) print(alist + alist) # It is the same as with the product operator, `*`. In the first case, we scale the vector, while in the second case, we concatenate three times the same list. print(narray * 3) print(alist * 3) # Be aware of the difference because, within the same function, both types of arrays can appear. # Numpy arrays are designed for numerical and matrix operations, while lists are for more general purposes. # ## Matrix or Array of Arrays # # In linear algebra, a matrix is a structure composed of n rows by m columns. That means each row must have the same number of columns. With NumPy, we have two ways to create a matrix: # * Creating an array of arrays using `np.array` (recommended). # * Creating a matrix using `np.matrix` (still available but might be removed soon). # # NumPy arrays or lists can be used to initialize a matrix, but the resulting matrix will be composed of NumPy arrays only. # + npmatrix1 = np.array([narray, narray, narray]) # Matrix initialized with NumPy arrays npmatrix2 = np.array([alist, alist, alist]) # Matrix initialized with lists npmatrix3 = np.array([narray, [1, 1, 1, 1], narray]) # Matrix initialized with both types print(npmatrix1) print(npmatrix2) print(npmatrix3) # - # However, when defining a matrix, be sure that all the rows contain the same number of elements. Otherwise, the linear algebra operations could lead to unexpected results. # # Analyze the following two examples: # + # Example 1: okmatrix = np.array([[1, 2], [3, 4]]) # Define a 2x2 matrix print(okmatrix) # Print okmatrix print(okmatrix * 2) # Print a scaled version of okmatrix # + # Example 2: badmatrix = np.array([[1, 2], [3, 4], [5, 6, 7]]) # Define a matrix. Note the third row contains 3 elements print(badmatrix) # Print the malformed matrix print(badmatrix * 2) # It is supposed to scale the whole matrix # - # ## Scaling and translating matrices # # Now that you know how to build correct NumPy arrays and matrices, let us see how easy it is to operate with them in Python using the regular algebraic operators like + and -. # # Operations can be performed between arrays and arrays or between arrays and scalars. # Scale by 2 and translate 1 unit the matrix result = okmatrix * 2 + 1 # For each element in the matrix, multiply by 2 and add 1 print(result) # + # Add two sum compatible matrices result1 = okmatrix + okmatrix print(result1) # Subtract two sum compatible matrices. This is called the difference vector result2 = okmatrix - okmatrix print(result2) # - # The product operator `*` when used on arrays or matrices indicates element-wise multiplications. # Do not confuse it with the dot product. result = okmatrix * okmatrix # Multiply each element by itself print(result) # ## Transpose a matrix # # In linear algebra, the transpose of a matrix is an operator that flips a matrix over its diagonal, i.e., the transpose operator switches the row and column indices of the matrix producing another matrix. If the original matrix dimension is n by m, the resulting transposed matrix will be m by n. # # **T** denotes the transpose operations with NumPy matrices. matrix3x2 = np.array([[1, 2], [3, 4], [5, 6]]) # Define a 3x2 matrix print('Original matrix 3 x 2') print(matrix3x2) print('Transposed matrix 2 x 3') print(matrix3x2.T) # However, note that the transpose operation does not affect 1D arrays. nparray = np.array([1, 2, 3, 4]) # Define an array print('Original array') print(nparray) print('Transposed array') print(nparray.T) # perhaps in this case you wanted to do: nparray = np.array([[1, 2, 3, 4]]) # Define a 1 x 4 matrix. Note the 2 level of square brackets print('Original array') print(nparray) print('Transposed array') print(nparray.T) # ## Get the norm of a nparray or matrix # # In linear algebra, the norm of an n-dimensional vector $\vec a$ is defined as: # # $$ norm(\vec a) = ||\vec a|| = \sqrt {\sum_{i=1}^{n} a_i ^ 2}$$ # # Calculating the norm of vector or even of a matrix is a general operation when dealing with data. Numpy has a set of functions for linear algebra in the subpackage **linalg**, including the **norm** function. Let us see how to get the norm a given array or matrix: # + nparray1 = np.array([1, 2, 3, 4]) # Define an array norm1 = np.linalg.norm(nparray1) nparray2 = np.array([[1, 2], [3, 4]]) # Define a 2 x 2 matrix. Note the 2 level of square brackets norm2 = np.linalg.norm(nparray2) print(norm1) print(norm2) # - # Note that without any other parameter, the norm function treats the matrix as being just an array of numbers. # However, it is possible to get the norm by rows or by columns. The **axis** parameter controls the form of the operation: # * **axis=0** means get the norm of each column # * **axis=1** means get the norm of each row. # + nparray2 = np.array([[1, 1], [2, 2], [3, 3]]) # Define a 3 x 2 matrix. normByCols = np.linalg.norm(nparray2, axis=0) # Get the norm for each column. Returns 2 elements normByRows = np.linalg.norm(nparray2, axis=1) # get the norm for each row. Returns 3 elements print(normByCols) print(normByRows) # - # However, there are more ways to get the norm of a matrix in Python. # For that, let us see all the different ways of defining the dot product between 2 arrays. # ## The dot product between arrays: All the flavors # # The dot product or scalar product or inner product between two vectors $\vec a$ and $\vec b$ of the same size is defined as: # $$\vec a \cdot \vec b = \sum_{i=1}^{n} a_i b_i$$ # # The dot product takes two vectors and returns a single number. # + nparray1 = np.array([0, 1, 2, 3]) # Define an array nparray2 = np.array([4, 5, 6, 7]) # Define an array flavor1 = np.dot(nparray1, nparray2) # Recommended way print(flavor1) flavor2 = np.sum(nparray1 * nparray2) # Ok way print(flavor2) flavor3 = nparray1 @ nparray2 # Geeks way print(flavor3) # As you never should do: # Noobs way flavor4 = 0 for a, b in zip(nparray1, nparray2): flavor4 += a * b print(flavor4) # - # **We strongly recommend using np.dot, since it is the only method that accepts arrays and lists without problems** # + norm1 = np.dot(np.array([1, 2]), np.array([3, 4])) # Dot product on nparrays norm2 = np.dot([1, 2], [3, 4]) # Dot product on python lists print(norm1, '=', norm2 ) # - # Finally, note that the norm is the square root of the dot product of the vector with itself. That gives many options to write that function: # # $$ norm(\vec a) = ||\vec a|| = \sqrt {\sum_{i=1}^{n} a_i ^ 2} = \sqrt {a \cdot a}$$ # # ## Sums by rows or columns # # Another general operation performed on matrices is the sum by rows or columns. # Just as we did for the function norm, the **axis** parameter controls the form of the operation: # * **axis=0** means to sum the elements of each column together. # * **axis=1** means to sum the elements of each row together. # + nparray2 = np.array([[1, -1], [2, -2], [3, -3]]) # Define a 3 x 2 matrix. sumByCols = np.sum(nparray2, axis=0) # Get the sum for each column. Returns 2 elements sumByRows = np.sum(nparray2, axis=1) # get the sum for each row. Returns 3 elements np.sum() print('Sum by columns: ') print(sumByCols) print('Sum by rows:') print(sumByRows) # - # ## Get the mean by rows or columns # # As with the sums, one can get the **mean** by rows or columns using the **axis** parameter. Just remember that the mean is the sum of the elements divided by the length of the vector # $$ mean(\vec a) = \frac {{\sum_{i=1}^{n} a_i }}{n}$$ # + nparray2 = np.array([[1, -1], [2, -2], [3, -3]]) # Define a 3 x 2 matrix. Chosen to be a matrix with 0 mean mean = np.mean(nparray2) # Get the mean for the whole matrix meanByCols = np.mean(nparray2, axis=0) # Get the mean for each column. Returns 2 elements meanByRows = np.mean(nparray2, axis=1) # get the mean for each row. Returns 3 elements print('Matrix mean: ') print(mean) print('Mean by columns: ') print(meanByCols) print('Mean by rows:') print(meanByRows) # - # ## Center the columns of a matrix # # Centering the attributes of a data matrix is another essential preprocessing step. Centering a matrix means to remove the column mean to each element inside the column. The sum by columns of a centered matrix is always 0. # # With NumPy, this process is as simple as this: # + nparray2 = np.array([[1, 1], [2, 2], [3, 3]]) # Define a 3 x 2 matrix. nparrayCentered = nparray2 - np.mean(nparray2, axis=0) # Remove the mean for each column print('Original matrix') print(nparray2) print('Centered by columns matrix') print(nparrayCentered) print('New mean by column') print(nparrayCentered.mean(axis=0)) # - # **Warning:** This process does not apply for row centering. In such cases, consider transposing the matrix, centering by columns, and then transpose back the result. # # See the example below: # + nparray2 = np.array([[1, 3], [2, 4], [3, 5]]) # Define a 3 x 2 matrix. nparrayCentered = nparray2.T - np.mean(nparray2, axis=1) # Remove the mean for each row nparrayCentered = nparrayCentered.T # Transpose back the result print('Original matrix') print(nparray2) print('Centered by columns matrix') print(nparrayCentered) print('New mean by rows') print(nparrayCentered.mean(axis=1)) # - # Note that some operations can be performed using static functions like `np.sum()` or `np.mean()`, or by using the inner functions of the array # + nparray2 = np.array([[1, 3], [2, 4], [3, 5]]) # Define a 3 x 2 matrix. mean1 = np.mean(nparray2) # Static way mean2 = nparray2.mean() # Dinamic way print(mean1, ' == ', mean2) # - # Even if they are equivalent, we recommend the use of the static way always. # # **Congratulations! You have successfully reviewed vector and matrix operations with Numpy!**
NLP_C1_W3_lecture_nb_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: xaitools # language: python # name: xaitools # --- # # ASE2021 Hands-on Exercise # # Below are interactive hands-on exercises for model-agnostic techniques for generating local explanations. # First, we need to load necesarry libraries as well as preparing datasets. # # + ## Load Data and preparing datasets # Import for Load Data from os import listdir from os.path import isfile, join import pandas as pd # Import for Split Data into Training and Testing Samples from sklearn.model_selection import train_test_split train_dataset = pd.read_csv(("../../datasets/lucene-2.9.0.csv"), index_col = 'File') test_dataset = pd.read_csv(("../../datasets/lucene-3.0.0.csv"), index_col = 'File') outcome = 'RealBug' features = ['OWN_COMMIT', 'Added_lines', 'CountClassCoupled', 'AvgLine', 'RatioCommentToCode'] # commits - # of commits that modify the file of interest # Added lines - # of added lines of code # Count class coupled - # of classes that interact or couple with the class of interest # LOC - # of lines of code # RatioCommentToCode - The ratio of lines of comments to lines of code # process outcome to 0 and 1 train_dataset[outcome] = pd.Categorical(train_dataset[outcome]) train_dataset[outcome] = train_dataset[outcome].cat.codes test_dataset[outcome] = pd.Categorical(test_dataset[outcome]) test_dataset[outcome] = test_dataset[outcome].cat.codes X_train = train_dataset.loc[:, features] X_test = test_dataset.loc[:, features] y_train = train_dataset.loc[:, outcome] y_test = test_dataset.loc[:, outcome] class_labels = ['Clean', 'Defective'] X_train.columns = features X_test.columns = features training_data = pd.concat([X_train, y_train], axis=1) testing_data = pd.concat([X_test, y_test], axis=1) # - # Then, we construct a Random Forests model as a predictive model to be explained. # # **(1) Please construct a Random Forests model using the code cell below.** # # # `````{admonition} Tips # :class: tip # ```` # # our_rf_model = RandomForestClassifier(random_state=0) # our_rf_model.fit(X_train, y_train) # # ```` # ````` # + from sklearn.ensemble import RandomForestClassifier # Please fit your Random Forests model here! # - # ## LIME # # **LIME** (i.e., Local Interpretable Model-agnostic # Explanations) {cite}`ribeiro2016should` is a model-agnostic technique that # mimics the behaviour of the black-box model to generate the explanations # of the predictions of the black-box model. Given a black-box model and # an instance to explain, LIME performs 4 key steps to generate an # instance explanation as follows: # # - First, LIME randomly generates instances surrounding the instance of # interest. # # - Second, LIME uses the black-box model to generate predictions of the # generated random instances. # # - Third, LIME constructs a local regression model using the generated # random instances and their generated predictions from the black-box # model. # # - Finally, the coefficients of the regression model indicate the # contribution of each metric on the prediction of the instance of # interest according to the black-box model. # # **(2) Please use LIME to explain the prediction of *DocumentsWriter.java* that is generated from your Random Forests model.** # # `````{admonition} Tips # :class: tip # ```` # # # LIME Step 1 - Construct an explainer # our_lime_explainer = lime.lime_tabular.LimeTabularExplainer( # training_data = X_train.values, # mode = 'classification', # training_labels = y_train, # feature_names = features, # class_names = class_labels, # discretize_continuous = True) # # # LIME Step 2 - Use the constructed explainer with the predict function # # of your predictive model to explain any instance # lime_local_explanation_of_an_instance = lime_explainer.explain_instance( # data_row = X_test.loc['FileName.py', :], # predict_fn = our_rf_model.predict_proba, # num_features = 5, # top_labels = 1) # # # Please use the code below to visualise the generated LIME explanation. # lime_local_explanation_of_an_instance.show_in_notebook() # # ```` # ````` # + tags=[] # Import for LIME import lime import lime.lime_tabular file_to_be_explained = 'src/java/org/apache/lucene/index/DocumentsWriter.java' print(f'Explaining {file_to_be_explained} with LIME') # LIME Step 1 - Construct an explainer # LIME Step 2 - Use the constructed explainer with the predict function of your predictive model to explain any instance # visualise the generated LIME explanation # - # ## SHAP # # **SHAP** (Shapley values) {cite}`lundberg2018consistentshap` is a model-agnostic technique that generate the explanations of the black-box model based on game theory. # # # **(2) Please use LIME to explain the prediction of *DocumentsWriter.java* that is generated from your Random Forests model.** # # `````{admonition} Tips # :class: tip # ```` # # # SHAP Step 1 - Construct an explainer with the predict function # # of your predictive model # our_shap_explainer = shap.KernelExplainer(our_rf_model.predict, X_test) # # # SHAP Step 2 - Generate the SHAP explanation of an instance to be explained # shap_explanations_of_an_instance = our_shap_explainer.shap_values(X_test.iloc[file_to_be_explained_idx, :]) # # # Please use the code below to visualise the generated SHAP explanation (Force plot). # shap.initjs() # shap.force_plot(our_shap_explainer.expected_value, # shap_explanations_of_instances, # X_test.iloc[file_to_be_explained_idx,:]) # # ```` # ````` # + tags=[] # Import libraries for SHAP import subprocess import sys import importlib import numpy import shap file_to_be_explained = 'src/java/org/apache/lucene/index/DocumentsWriter.java' file_to_be_explained_idx = list(X_test.index).index(file_to_be_explained) # SHAP Step 1 - Construct an explainer with the predict function # SHAP Step 2 - Generate the SHAP explanation of an instance to be explained # visualise the generated SHAP explanation # - # ## PyExplainer # # **PyExplainer** {cite}`pornprasit2021pyexplainer` is a rule-based model-agnostic technique that utilises a local rule-based regression model to learn the associations between the characteristics of the synthetic instances and the predictions from the black-box model. Given a black-box model and an instance to explain, PyExplainer performs four key steps to generate an instance explanation as follows: # # - First, PyExplainer generates synthetic neighbors around the instance to be explained using the crossover # and mutation techniques # # - Second, PyExplainer obtains the predictions of the synthetic neighbors from the black-box model # # - Third, PyExplainer builds a local rule-based regression model # # - Finally, PyExplainer generates an explanation from the local model for the instance to be explained # # **(3) Please use PyExplainer to explain the prediction of *DocumentsWriter.java* that is generated from your Random Forests model.** # # `````{admonition} Tips # :class: tip # ```` # import numpy as np # np.random.seed(0) # # # PyExplainer Step 1 - Construct a PyExplainer # our_pyexplainer = PyExplainer(X_train = X_train, # y_train = y_train, # indep = X_train.columns, # dep = outcome, # blackbox_model = rf_model) # # # PyExplainer Step 2 - Generate the rule-based explanation of an instance to be explained # pyexplainer_explanation_of_an_instance = our_pyexplainer.explain( # X_explain = X_test.loc[file_to_be_explained,:].to_frame().transpose(), # y_explain = pd.Series(bool(y_test.loc[file_to_be_explained]), # index = [file_to_be_explained], # name = outcome), # search_function = 'crossoverinterpolation', # max_iter=1000, # max_rules=20, # random_state=0, # reuse_local_model=True) # # # Please use the code below to visualise the generated PyExplainer explanation (What-If interactive visualisation). # our_pyexplainer.visualise(pyexplainer_explanation_of_an_instance, title="Why this file is defect-introducing ?") # # ```` # ````` # + # Import for PyExplainer from pyexplainer.pyexplainer_pyexplainer import PyExplainer file_to_be_explained = 'src/java/org/apache/lucene/index/DocumentsWriter.java' # PyExplainer Step 1 - Construct a PyExplainer # PyExplainer Step 2 - Generate the rule-based explanation of an instance to be explained # visualise the generated rule-based PyExplainer explanation # - # # All of the above explanations are the property-contrast explanation within a file (https://xai4se.github.io/xai/theory-of-explanations.html). # In fact, model-agnostic techniques can be used to generate other types of explanations, e.g., Object-contrast (i.e., the differences of explanations between two objects). # # **(4) Please use LIME to generate the object-contrast explanations between *DocumentsWriter.java* and *TestStringIntern.java*.** # # + # Import for LIME import lime import lime.lime_tabular file_to_be_explained = 'src/java/org/apache/lucene/index/DocumentsWriter.java' another_file_to_be_explained = 'src/test/org/apache/lucene/util/TestStringIntern.java' print(f'Generating the object-contrast explanations between {file_to_be_explained} and {another_file_to_be_explained} with LIME') # LIME Step 1 - Construct an explainer # LIME Step 2 - Use the constructed explainer with the predict function of your predictive model to explain the two instances # visualise the generated LIME explanation - (DocumentsWriter.java) # - # visualise the generated LIME explanation - (TestStringIntern.java)
docs/tutorials/hands-on-exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.12 ('nlp') # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="2tL2VLdonI2u" outputId="de903734-db10-469f-9893-6bca8ac4b457" from transformers import LEDTokenizerFast import pandas as pd import numpy as np from tqdm import tqdm # + colab={"base_uri": "https://localhost:8080/", "height": 177, "referenced_widgets": ["fa88bc2036cd4d37b3f09e485213c515", "e4aec2c098714060b8e0130996356c2b", "350e29df101745a7a0179f331da39e01", "a63dbef366df4a8588b1d6f3f4319ec4", "08b6f4d2405147dfb2d811002712ec17", "e01659e10c8a47ccb0f50bb271064bce", "da6a4d1aa7664d93b65ab4c8476af8f0", "432f8874f5c64d35980a2ca797eccf11", "2c680016de914adb8a4f8a6c2f183958", "7d6f181d600e41d693f11c638496f764", "<KEY>", "2abd35ced4b24d6a81ac279251aca887", "101cc3e982204e17bc3a73d12d2d5aa0", "<KEY>", "41a4f2205beb408e8bff841fce7745ca", "<KEY>", "<KEY>", "b93e665dc5c743168f1daba7bb69adc3", "0b0d1e9cae504edab187e84ad1d67546", "<KEY>", "cf3eda6f63b64894993bad768bce0b54", "8548bc7529f94fa0ad4dd4a28dd114f2", "a878d0ccffd54f3bbe5b6eb0b7e355b9", "fff76fb1848e477db0fb853d5e1d5a01", "<KEY>", "b4a9c09366c6445790a0b5135afc31d8", "c5c5b51a86b848699e17188936d8f5e7", "<KEY>", "7cc4ee696127435d82a66aa08b1947d7", "<KEY>", "4fcb00e7a43749dc95ddd3ed62305803", "023a9447caf648cda741f315d840ac22", "<KEY>", "be5a090b899e4f21aca77d8f5acaa63b", "<KEY>", "488a2e3f92074961a62ac007950e0a9f", "<KEY>", "af55d28a19294c97aea400304c3ab0e4", "<KEY>", "24fb92271af046d5af902a45ec1617e0", "fc24428d0ea34ed0b74ee47f33c5f0d0", "3de8fa287b69400f978d3a48def9f644", "2b52603efd6a4071a86d7bdd27455e82", "7deb280e85c6425bbe3a7975dd7856f5", "<KEY>", "<KEY>", "0832381b20344150ac6630493c03c298", "<KEY>", "610c814fa9b54979ac2c9d595788e4e8", "d03c2462eaac405cb21be3a6ae3a10f0", "<KEY>", "761b9e1607cf4926986eb6d4925429cc", "<KEY>", "0a207bf4ea0b44c2a3a09d2c1ec2c17e", "72f3ae0ba804484c80bf9d91e8db0e85"]} id="RDbNnqjrbuh4" outputId="cd04a558-5e3d-424e-be12-4e0f762267f9" tokenizer = LEDTokenizerFast.from_pretrained("allenai/led-base-16384") df = pd.read_csv('train-old.csv') # type: pd.DataFrame df["box_center"] = None df["box_length"] = None # + colab={"base_uri": "https://localhost:8080/", "height": 852} id="ueMmUYhEyCAa" outputId="c6e9083d-02a3-480c-c146-76e18b624f2f" df # + colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["7dc6f79bbf8d4d87ac7de72ae23caa62", "70e1ddf040a34d99a811e173b33411a1", "96ed4fdc6aa540aba815138f4ed1cf33", "0dc07ad83f80486e90cf76d677c30b6d", "f5ed2b53be6b47ba89fb9a7737a19a0d", "5f4eef8e27fe4c51a758ffa368e33cf4", "e956baae3d8d4d95864da5429ea9d8f1", "78c14e0738934ad5adf9e47d3827ddf4", "b8328be6300149eb91b3844fe889dc75", "7bb5ff0f3a7e47e7a6ddbea692dcfb07", "4ccaae9194b64bd48f29d55c5bf56547"]} id="07yHh794n709" outputId="1e691302-425e-4d4f-a635-63c1ef1b1a24" for i in tqdm(range(len(df))): id = df.iloc[i]["id"] discourse_start = int(df.iloc[i]["discourse_start"]) discourse_end = int(df.iloc[i]["discourse_end"]) with open(f'train/{id}.txt', 'r') as f: text = f.read() char_array = np.zeros( (len(text)) ) char_array[discourse_start : discourse_end] = 1 encoding = tokenizer(text, padding='max_length', truncation=True, return_offsets_mapping=True) map_token_to_char = encoding['offset_mapping'] len_sequence = len(map_token_to_char) token_array = np.zeros(len_sequence) for k in range(len(token_array)): token_array[k] = char_array[ map_token_to_char[k][0] - 1] pred = token_array.nonzero()[0] center = np.mean(pred) / len_sequence length = pred.shape[0] / len_sequence df.at[i, "box_center"] = center df.at[i, "box_length"] = length # + colab={"base_uri": "https://localhost:8080/", "height": 852} id="FWOmLK0JsU7X" outputId="a8c45caa-664a-4c1d-d10e-fe659d5090d8" df # + id="qjeKhLew-xNC" df.to_csv("train.csv") # - dfna = df.fillna(0) dfna[dfna['box_center'].isna()] dfna.to_csv("train.csv")
input/feedback-prize-2021/add_token_prediction copy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:anaconda] # language: python # name: conda-env-anaconda-py # --- # + import os import numpy as np from astropy.io import ascii as asc from astropy.table import Table from astropy.convolution import convolve, Box1DKernel from matplotlib import pyplot as plt from utilities_az import spectroscopy as spec # - UV_DIR = '../data/swiftuvot/reduced_default/' OPTICAL_DIR = '../data/spectra/lco/' # # UV and Optical Data optical_fname = 'asassn15oz_20150904_redblu_rest_dustcorrsca.dat' UV_fname = 'combine_epoch1_rest_dustcorrsca.dat' uv_tbdata = asc.read(os.path.join(UV_DIR, UV_fname), names=['wave', 'flux']) optical_tbdata = asc.read(os.path.join(OPTICAL_DIR, optical_fname), names=['wave', 'flux']) spec_optical = spec.spectrum1d(optical_tbdata['wave'], optical_tbdata['flux']) tbdata_syn = asc.read('../data/syn++/asassn15oz_uv.txt', names=['wave', 'flux', 'err']) spec_syn = spec.spectrum1d(tbdata['wave'], tbdata['flux']) scale_spec_syn = spec.scale_spectra(spec_syn, spec_optical) plt.plot(optical_tbdata['wave']+60, convolve(optical_tbdata['flux'], Box1DKernel(3)), label='FLOYDS') plt.plot(uv_tbdata['wave'], convolve(uv_tbdata['flux'], Box1DKernel(7)), label='SWIFT') plt.plot(scale_spec_syn.wave, scale_spec_syn.flux) plt.ylim(0E-14, 2E-14) plt.xlim(2000, 6000) plt.legend()
notebooks/scale_uv_optical.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Udemy Amazon Reviews extraction import requests # Importing requests to extract content from a url from bs4 import BeautifulSoup as bs import re import nltk from nltk.corpus import stopwords import matplotlib.pyplot as plt from wordcloud import WordCloud # creating empty reviews list tablemate_reviews=[] #forest = ["the","king","of","jungle"] # + # Data extraction - Amazon reviews extraction # - for i in range(1,11): ip=[] url = "https://www.amazon.in/MULTI-TABLE-Proud-Holder-Stronger/product-reviews/B06Y63RX13/ref=cm_cr_getr_d_paging_btm_prev" if i==1: url = url+"_"+str(i)+"?ie=UTF8&reviewerType=all_reviews&pageNumber="+str(i) else: url = url+"_"+str(i)+"?ie=UTF8&reviewerType=all_reviews&pageNumber="+str(i) #print(url) response = requests.get(url) soup = bs(response.content, "html.parser")# creating soup object to iterate over the extracted content reviews = soup.findAll("span", attrs={"class","a-size-base review-text review-text-content"})# Extracting the content under specific for i in range(len(reviews)): tablemate_reviews.append(reviews[i].text) print(tablemate_reviews)# adding the reviews of one page to empty list which in future contai tablemate_reviews # + # Data cleansing and Wordcloud # + # writing reviews in a text file # - with open("tablemate_reviews.txt","w",encoding='utf8') as output: output.write(str(tablemate_reviews)) # Joining all the reviews into single paragraph ip_rev_string = " ".join(tablemate_reviews) ip_rev_string # Removing unwanted symbols incase if exists ip_rev_string = re.sub("[^A-Za-z" "]+"," ",ip_rev_string).lower() ip_rev_string = re.sub("[0-9" "]+"," ",ip_rev_string) ip_rev_string # words that contained in iphone 8 plus reviews ip_reviews_words = ip_rev_string.split(" ") ip_reviews_words # + # stop_words = stopwords.words('english') can use this command if nltk loaded # - with open("C:\\Users\\<NAME>\\stopwords_en.txt") as sw: stopwords = sw.read() stopwords = stopwords.split("\n") temp = ["this","is","awsome","Data","Science"] [i for i in temp if i not in "is"] stopwords ip_reviews_words = [w for w in ip_reviews_words if not w in stopwords] # Joining all the reviews into single paragraph ip_rev_string = " ".join(ip_reviews_words) ip_rev_string # + # Python3 code to find frequency of each word def wordListToFreqDict(wordlist): wordfreq = [wordlist.count(p) for p in wordlist] return dict(list(zip(wordlist,wordfreq))) def sortFreqDict(freqdict): aux = [(freqdict[key], key) for key in freqdict] aux.sort() aux.reverse() return aux str_list = ip_rev_string.split() dictionary = wordListToFreqDict(str_list) sorteddict = sortFreqDict(dictionary) for s in sorteddict: print(str(s)) # - xyz = open('tablematereviews.txt', 'w+') xyz.seek(0) xyz.read() xyz.close() pwd # + # WordCloud can be performed on the string inputs. That is the reason we have combined # entire reviews into single paragraph # Simple word cloud # - wordcloud_ip = WordCloud( background_color='black', width=1800, height=1400 ).generate(ip_rev_string) plt.imshow(wordcloud_ip) fig=plt.figure(figsize=(20,10),dpi=300) # positive words # Choose the path for +ve words stored in system with open("C:\\Users\\<NAME>\\Documents\\poswords.txt","r") as pos: poswords = pos.read().split("\n") poswords = poswords[36:] # negative words # Choose the path for -ve words stored in system with open("C:\\Users\\<NAME>\\Documents\\negwords.txt","r") as neg: negwords = neg.read().split("\n") negwords = negwords[37:] # + # negative word cloud # Choosing the only words which are present in negwords ip_neg_in_neg = " ".join([w for w in ip_reviews_words if w in negwords]) wordcloud_neg_in_neg = WordCloud( background_color='black', width=1800, height=1400 ).generate(ip_neg_in_neg) # - plt.figure(1) plt.imshow(wordcloud_neg_in_neg) # + # positive word cloud # Choosing the only words which are present in poswords ip_pos_in_pos = " ".join([w for w in ip_reviews_words if w in poswords]) wordcloud_pos_in_pos = WordCloud( background_color='black', width=1800, height=1400 ).generate(ip_pos_in_pos) # - plt.figure(2) plt.imshow(wordcloud_pos_in_pos) # + # With white background # + # negative word cloud # Choosing the only words which are present in negwords ip_neg_in_neg = " ".join([w for w in ip_reviews_words if w in negwords]) wordcloud_neg_in_neg = WordCloud( background_color='white', width=1800, height=1400 ).generate(ip_neg_in_neg) # - plt.figure(1) plt.imshow(wordcloud_neg_in_neg) # + # positive word cloud # Choosing the only words which are present in poswords ip_pos_in_pos = " ".join([w for w in ip_reviews_words if w in poswords]) wordcloud_pos_in_pos = WordCloud( background_color='white', width=1800, height=1400 ).generate(ip_pos_in_pos) # - plt.figure(2) plt.imshow(wordcloud_pos_in_pos)
Tablemate Reviews extraction and wordclouds from Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Recommendations with MovieTweetings: Most Popular Recommendation # # Now that you have created the necessary columns we will be using throughout the rest of the lesson on creating recommendations, let's get started with the first of our recommendations. # # To get started, read in the libraries and the two datasets you will be using throughout the lesson using the code below. # # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import tests as t # %matplotlib inline # Read in the datasets movies = pd.read_csv('movies_clean.csv') reviews = pd.read_csv('reviews_clean.csv') del movies['Unnamed: 0'] del reviews['Unnamed: 0'] # - # #### Part I: How To Find The Most Popular Movies? # # For this notebook, we have a single task. The task is that no matter the user, we need to provide a list of the recommendations based on simply the most popular items. # # For this task, we will consider what is "most popular" based on the following criteria: # # * A movie with the highest average rating is considered best # * With ties, movies that have more ratings are better # * A movie must have a minimum of 5 ratings to be considered among the best movies # * If movies are tied in their average rating and number of ratings, the ranking is determined by the movie that is the most recent rating # # With these criteria, the goal for this notebook is to take a **user_id** and provide back the **n_top** recommendations. Use the function below as the scaffolding that will be used for all the future recommendations as well. # + movie_rating = reviews.groupby('movie_id')['rating'] avg_rating=movie_rating.mean() # highest average rating num_rating=movie_rating.count() # number of rating last_rating= pd.DataFrame(reviews.groupby('movie_id')['date'].max()) last_rating.columns=["last_rating"] # - rating_count_df = pd.DataFrame({'avg_rating': avg_rating, 'num_rating': num_rating}) rating_count_df = rating_count_df.join(last_rating) # merge with the movies dataset movie_recs = movies.set_index('movie_id').join(rating_count_df) ranked_movies=movie_recs.sort_values(['avg_rating','num_rating','last_rating'],ascending=[False,False,False]) ranked_movies= ranked_movies[ranked_movies['num_rating']>4] ranked_movies.head() # + def create_ranked_df(movies, reviews): ''' INPUT movies - the movies dataframe reviews - the reviews dataframe OUTPUT ranked_movies - a dataframe with movies that are sorted by highest avg rating, more reviews, then time, and must have more than 4 ratings ''' # Pull the average ratings and number of ratings for each movie movie_ratings = reviews.groupby('movie_id')['rating'] avg_ratings = movie_ratings.mean() num_ratings = movie_ratings.count() last_rating = pd.DataFrame(reviews.groupby('movie_id').max()['date']) last_rating.columns = ['last_rating'] # Add Dates rating_count_df = pd.DataFrame({'avg_rating': avg_ratings, 'num_ratings': num_ratings}) rating_count_df = rating_count_df.join(last_rating) # merge with the movies dataset movie_recs = movies.set_index('movie_id').join(rating_count_df) # sort by top avg rating and number of ratings ranked_movies = movie_recs.sort_values(['avg_rating', 'num_ratings', 'last_rating'], ascending=False) # for edge cases - subset the movie list to those with only 5 or more reviews ranked_movies = ranked_movies[ranked_movies['num_ratings'] > 4] return ranked_movies def popular_recommendations(user_id, n_top, ranked_movies): ''' INPUT: user_id - the user_id (str) of the individual you are making recommendations for n_top - an integer of the number recommendations you want back ranked_movies - a pandas dataframe of the already ranked movies based on avg rating, count, and time OUTPUT: top_movies - a list of the n_top recommended movies by movie title in order best to worst ''' top_movies = list(ranked_movies['movie'][:n_top]) return top_movies # - # Usint the three criteria above, you should be able to put together the above function. If you feel confident in your solution, check the results of your function against our solution. On the next page, you can see a walkthrough and you can of course get the solution by looking at the solution notebook available in this workspace. # + # Top 20 movies recommended for id 1 ranked_movies = create_ranked_df(movies, reviews) # only run this once - it is not fast recs_20_for_1 = popular_recommendations('1', 20, ranked_movies) # Top 5 movies recommended for id 53968 recs_5_for_53968 = popular_recommendations('53968', 5, ranked_movies) # Top 100 movies recommended for id 70000 recs_100_for_70000 = popular_recommendations('70000', 100, ranked_movies) # Top 35 movies recommended for id 43 recs_35_for_43 = popular_recommendations('43', 35, ranked_movies) # - # Usint the three criteria above, you should be able to put together the above function. If you feel confident in your solution, check the results of your function against our solution. On the next page, you can see a walkthrough and you can of course get the solution by looking at the solution notebook available in this workspace. # + ### You Should Not Need To Modify Anything In This Cell # check 1 assert t.popular_recommendations('1', 20, ranked_movies) == recs_20_for_1, "The first check failed..." # check 2 assert t.popular_recommendations('53968', 5, ranked_movies) == recs_5_for_53968, "The second check failed..." # check 3 assert t.popular_recommendations('70000', 100, ranked_movies) == recs_100_for_70000, "The third check failed..." # check 4 assert t.popular_recommendations('43', 35, ranked_movies) == recs_35_for_43, "The fourth check failed..." print("If you got here, looks like you are good to go! Nice job!") # - # **Notice:** This wasn't the only way we could have determined the "top rated" movies. You can imagine that in keeping track of trending news or trending social events, you would likely want to create a time window from the current time, and then pull the articles in the most recent time frame. There are always going to be some subjective decisions to be made. # # If you find that no one is paying any attention to your most popular recommendations, then it might be time to find a new way to recommend, which is what the next parts of the lesson should prepare us to do! # # ### Part II: Adding Filters # # Now that you have created a function to give back the **n_top** movies, let's make it a bit more robust. Add arguments that will act as filters for the movie **year** and **genre**. # # Use the cells below to adjust your existing function to allow for **year** and **genre** arguments as **lists** of **strings**. Then your ending results are filtered to only movies within the lists of provided years and genres (as `or` conditions). If no list is provided, there should be no filter applied. # # You can adjust other necessary inputs as necessary to retrieve the final results you are looking for! def popular_recs_filtered(user_id,n_top,ranked_movies,years=None,genres =None): ''' INPUT: user_id - user id of customer for recommendation n_top - number of movies you want to recommend ranked movies - previously generated movie ranking based on average movie rating, number of ratings, and recency. years - a list of strings for movie release year genres - list of strings for movie genre type OUTPUT: top_movies- recommended n_top movie titles in the order, best to worst. ''' if years is not None: ranked_movies= ranked_movies[ranked_movies.date.isin(years)] if genres is not None: num_genre_match = ranked_movies[genres].sum(axis=1) ranked_movies = ranked_movies.loc[num_genre_match > 0, :] # return a list of recommended movies top_choice=list(ranked_movies['movie'][:n_top]) return top_choice def popular_recs_filtered(user_id, n_top, ranked_movies, years=None, genres=None): ''' INPUT: user_id - the user_id (str) of the individual you are making recommendations for n_top - an integer of the number recommendations you want back ranked_movies - a pandas dataframe of the already ranked movies based on avg rating, count, and time years - a list of strings with years of movies genres - a list of strings with genres of movies OUTPUT: top_movies - a list of the n_top recommended movies by movie title in order best to worst ''' # Filter movies based on year and genre if years is not None: ranked_movies = ranked_movies[ranked_movies['date'].isin(years)] if genres is not None: num_genre_match = ranked_movies[genres].sum(axis=1) ranked_movies = ranked_movies.loc[num_genre_match > 0, :] # create top movies list top_movies = list(ranked_movies['movie'][:n_top]) return top_movies # + # Top 20 movies recommended for id 1 with years=['2015', '2016', '2017', '2018'], genres=['History'] recs_20_for_1_filtered = popular_recs_filtered('1', 20, ranked_movies, years=['2015', '2016', '2017', '2018'], genres=['History']) # Top 5 movies recommended for id 53968 with no genre filter but years=['2015', '2016', '2017', '2018'] recs_5_for_53968_filtered = popular_recs_filtered('53968', 5, ranked_movies, years=['2015', '2016', '2017', '2018']) # Top 100 movies recommended for id 70000 with no year filter but genres=['History', 'News'] recs_100_for_70000_filtered = popular_recs_filtered('70000', 100, ranked_movies, genres=['History', 'News']) # + ### You Should Not Need To Modify Anything In This Cell # check 1 assert t.popular_recs_filtered('1', 20, ranked_movies, years=['2015', '2016', '2017', '2018'], genres=['History']) == recs_20_for_1_filtered, "The first check failed..." # check 2 assert t.popular_recs_filtered('53968', 5, ranked_movies, years=['2015', '2016', '2017', '2018']) == recs_5_for_53968_filtered, "The second check failed..." # check 3 assert t.popular_recs_filtered('70000', 100, ranked_movies, genres=['History', 'News']) == recs_100_for_70000_filtered, "The third check failed..." print("If you got here, looks like you are good to go! Nice job!") # -
Movie Recommendation/Most_Popular_Recommendations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # UNIVERSIDAD NACIONAL DE CÓRDOBA# # ## Fa.M.A.F – Observatorio Astronómico ## # ### Licenciatura en Astronomía ### # ### Tapia Martina ### # ### Astrometría 2020 ### # ## Práctico N° 3: Bases de Datos ## # ## Introducción ## # # Debido a que las nuevas tecnologías aportan cantidades significativas de datos, se hace imperiosa la necesidad de almacenar los mismos de manera que su acceso sea eficiente y útil. En general, la información de grandes relevamientos, simulaciones numéricas, etc. está organizada en Bases de Datos. El paradigma más utilizado para organizar bases de datos es el de *Base de Datos Relacional*. En este tipo de modelo la información se organiza en tablas, que se relacionan entre sí a partir de una propiedad de los datos. Cada tabla es un conjunto de registros. Existen programas o sistemas de gestión de bases de datos relacionales. Entre los más conocidos, se destacan por ejemplo MySQL, PostgreSQL, Oracle y Microsoft SQL Server. # El **objetivo** de este trabajo es lograr una familiarización con el manejo de las bases de datos, pudiendo extraer información, procesar y analizar la misma con diferentes métodos y principios de probabilidad y estadística. # # ## Conceptos básicos ## # Algunas definiciones importantes para el desarrollo de las actividades de este práctico son: # # - **Base de datos:** Conjunto de datos pertenecientes a un mismo contexto y almacenados sistemáticamente para su registro. # - **Tabla:** Es un conjunto de datos con ciertas características en común. # - **Registro:** Es un objeto único de datos implícitamente estructurados en una tabla. Corresponde a una fila en las tablas. # - **Campo:** Es la mínima unidad de información a la que se puede acceder. Corresponde a una columna de una tabla. # - **Relación Vínculo entre los campos de distintas tablas:** La información está organizada en tablas, pero se puede reunir usando vínculos. # # ### SQL: Structured Query Languaje ### # # SQL (en español: lenguaje de consulta estructurada) es un lenguaje de dominio específico utilizado en programación, diseñado para administrar, y recuperar información de sistemas de gestión de bases de datos relacionales. Una de sus principales características es el manejo del álgebra y el cálculo relacional para efectuar consultas con el fin de recuperar, de forma sencilla, información de bases de datos, así como realizar cambios en ellas. # # Originalmente basado en el álgebra relacional y en el cálculo relacional, SQL consiste en un lenguaje de definición de datos, un lenguaje de manipulación de datos y un lenguaje de control de datos. El alcance de SQL incluye la inserción de datos, consultas, actualizaciones y borrado, la creación y modificación de esquemas y el control de acceso a los datos. # # El SQL es uno de los lenguajes más utilizados para manipular y acceder a bases de datos. Para extraer información de una base de datos se requiere la parte de manipulación de datos, cuyas instrucciones principales son: # # - **SELECT, FROM, INSERT INTO, WHERE** # - Un **query** básico de SQL tiene la siguiente forma: # ``` # SELECT nombre(s)_de_columna(s) # FROM nombre_de_la_tabla # WHERE nombre_columna operador valor # ``` # donde "operador” puede ser AND, OR, >, <, ==, !=. # # ### SDSS: Sloan Digital Sky Survey ### # # El *SDSS* es un proyecto de investigación del espacio mediante imágenes en el espectro visible y de corrimiento al rojo, realizada en un telescopio específico de ángulo amplio y de 2,5 metros situado en el observatorio Apache Point de Nuevo México y comenzada en 2000. # # El nombre proviene de la fundación <NAME>, y pretende cartografiar una cuarta parte del cielo visible, obtener observaciones acerca de 100 millones de objetos y el espectro de un millón de objetos. # # En el año 2006 la exploración entró en una nueva fase, el SDSS-II, extendiendo las observaciones para explorar la estructura y la composición estelar de la Vía Láctea mediante los proyectos SEGUE (sigla del inglés Sloan Extension for Galactic Understanding and Exploration) y Búsqueda de Supernovas Sloan (en inglés Sloan Supernova Survey), el cual busca eventos supernova Ia para medir la distancia de objetos lejanos. # # En lo que sigue se eligió un catálogo de galaxias de la base de datos SDSS como caso de estudio para trabajar con bases de datos en observatorios virtuales. # Para realizar el trabajo se utilizó el *SDSS CasJobs*: un lugar de trabajo en línea para grandes catálogos científicos, diseñado para emular y mejorar el acceso a consultas locales de forma libre en un entorno web. # # Algunas características de esta aplicación incluyen: # # - Ejecución de consultas sincrónicas y asincrónicas, en forma de trabajos "rápidos" y "largos". # - Una consulta 'Historial' que registra las consultas y su estado. # - Una base de datos de usuario personalizada del lado del servidor, denominada 'MyDB', que permite la creación de Tablas/funciones/procedimientos persistentes. # - Intercambio de datos entre usuarios, a través del mecanismo "Grupos". # - Descarga de datos, a través de la extracción de tablas MyDB, en varios formatos. # - Múltiples opciones de interfaz, incluido un cliente de navegador, así como una herramienta de línea de comandos basada en java. # # ## Procedimiento ## # # ### Primeros pasos ### # # Para poder llevar a cabo las actividades de estudio y análisis, antes se realiazaron varios pasos; primero se ingresó al sitio de CasJobs: http://skyservice.pha.jhu.edu/casjobs/default.aspx y se creó una cuenta de ususario. Luego, se estudio el sitio web de manera de identificar los elementos principales de la base de datos: tablas, campos y registros. Además, se exploró la estructura de la base de datos usando Skyserver → Schema Browser. A continuación, en la pestaña Query, se ingresó a "Sample SQL queries”, y se elegió algunos ejemplos para ejecutar y analizar el resultado que se obtenia en cada caso. También, con la ayuda de los ejemplos presentados en 'Sample SQL queries' se pudo comprender el uso de las instrucciones "select”, "from”, "into”, "where” y "join” y el uso de "alias”. # # ### Obtención de la Tabla de datos ### # # Una vez explorado y analizado los ejemplos del sitio web CasJobs, se procedió a obtener una lista de galaxias con las siguientes propiedades: # - clasificación: elíptica/espiral # - magnitudes Petrosian en las bandas u, g, y r # - redshift # # La lista de galaxias se obtuvo con el siguiente *query*: # ``` # SELECT TOP 1000 # s.specObjID, g.spiral, g.elliptical, s.petroMag_u, s.petroMag_r, s.petroMag_g, s.z # FROM SpecPhoto AS s # JOIN zooSpec AS g ON g.specobjid = s.specObjID # WHERE # ((g.spiral = 1) or (g.elliptical = 1)) # ``` # Notar que se colocó 'TOP 1000' para limitar la cantidad de datos de la tabla. Otra posibilidad de limitar la cantidad de datos es usar condiciones particulares con 'WHERE'. En este caso, sólo se le pide que muestre las galaxias elipticas y espirales y no cualquier otro tipo morfológico de galaxias. Pero también es posible limitar el rango de magnitudes de cada banda y los valores de redshift. # # ### Importación de datos al Notebook### # # Una vez descargada la tabla de datos desde CasJobs, se procedió a colocarla en el mismo directorio de trabajo donde se creó el notebook de actividades. # Una vez iniciado el notebook lo que se hizo fue importar la tabla de datos con la función 'genfromtx' de Numpy. # Para poder realizar los análisis fue necesario separar los datos de la tabla original en dos grupos: galaxias elípticas y espirales y ésto se realizó con ayuda de np.where. Luego se procedió con las actividades. # #Primero se importan las librerías necesarias para trabajar import numpy as np import matplotlib.pyplot as plt import random import math import seaborn as sns sns.set() import scipy.stats data = np.genfromtxt('muestra1_MartinaTapia.csv',delimiter=',', skip_header = 1) #Se analiza cuales son las filas que tienen en la columna 1 (spiral) un 1 fes = np.where(data[:,1]==1)[0] len(fes) # Se separan los datos originales en un nuevo array que contiene todos los datos de las galaxias espirales y lo llamo sp sp = data[fes, :] #sp[0:10] # Se realiza el mismo procedimiento anterior para las galaxias elípticas feli = np.where(data[:,2]==1)[0] elip = data[feli, :] len(elip) # ## Actividad 1 ## # En la primer actividad con los datos de las galaxias obtenidos es estudiar la distribución de índices de color g-r y u-g para galaxias. Además, se queire determinar si para ambos casos las distribuciones son consistentes. Es decir se quiere ver si las galaxias elípticas y espirales son del mismo color o no. #Indices de color para las espirales id_sp_gr = sp[:,5] - sp[:,4] id_sp_ug = sp[:,3] - sp[:,5] #Indices de color para elipticas id_elip_gr = elip[:,5] - elip[:,4] id_elip_ug = elip[:,3] - elip[:,5] # + # Se grafica la Distribución del índdice de color u-g para los dos tipos galácticos. plt.title('Distribución del índice de color u-g para galaxias Elípticas y Espirales') plt.xlabel('Índice u-g') plt.ylabel('Frecuencias') plt.xlim(0,3) plt.hist(id_elip_ug, bins=np.linspace(0,2.0,15), alpha=0.5, label='u-g', color = 'greenyellow') plt.hist(id_sp_ug, bins=np.linspace(0,2.0,15),alpha=0.5,label='g-r') plt.show() # + # Se grafica la Distribución del índdice de color g-r para los dos tipos galácticos. plt.title('Distribución del índice de color g-r para galaxias Elípticas y Espirales') plt.xlabel('Índice g-r') plt.ylabel('Frecuencias') heli = plt.hist(id_elip_gr, bins=np.linspace(0, 2.0,11), alpha=0.5, label='u-g', color = 'greenyellow') hsp = plt.hist(id_sp_gr, bins=np.linspace(0, 2.0,11), alpha=0.5, label='g-r') plt.show() # - # ### Método de Kolmogorov-Smirnov ### # # Si se considera el caso donde las propiedades estadísticas de una muestra obtenidas a partir de experimentos repetidos usando variables aleatorias continuas, se quiere comparar con una función distribución de probabilidades $ F_{X} $. Uno podría, en principio, comparar un histograma y su correspondiente distribución de probabilidades bineada usando el método de chi-cuadrado. Desafortunadamente, el bineado es artificial y tiene gran influencia en los resultados. Consecuentemente, el método KS es más útil ya que no requiere de ningún bineado. # # El método compara funciones distribución $ F_{X} $ con funciones de distribución empíricas $ F_{\hat{X}} $ . Uno podría elegir diferentes maneras para comparar las distribuciones, por ejemplo, calcular el área entre las curvas $ F_{X} $ y $ F_{\hat{X}} $. El método KS eligió una simple medición: definir el valor máximo del modulo de la diferencia entre dos funciones de distribución acumuladas. Es decir, el estadístico es: # # $$ d_{máx} = máx_{-\infty < x < \infty} |{F_{X}(x) − F_{\hat{X}}(x)}| $$ # # Así mismo, si se quiesieran comparar dos distribuciones acumuladas observadas, el estadístico sería: # # $$ d_{máx} = máx_{-\infty < x < \infty} |{F_{\hat{X_1}}(x) − F_{\hat{X_2}}(x)}| $$ # # Lo que hace útil al método KS es que su distribución, en el caso de la hipótesis nula (datos extraídos de la misma distribución), puede ser calculada, al menos una aproximación,dando la significación de cualquier valor distinto de cero para dmax. # Una característica del método KS es que es invariante bajo reparametrizaciones de la variable x, es decir, se puede comprimir o o alargar el eje x, y la distancia máxima permanecera invariante. # La bondad del método KS se construye usando un valor crítico. Por lo tanto, la hipótesis nula es rechazada a nivel $\alpha $ si # # $$ d^{observ} _{max} > d^{\alpha}_{max} $$ # # donde $ d^{\alpha}_{max} $ se encuentra a patir de: # # $$ P(d_{max} \leq d^{\alpha}_{max}) = 1 − \alpha $$ # # Además, los valores de $d^{\alpha}_{max}$ se extraen a partir de tablas. # # Se procederá ahora a utililizar el método KS para comparar las distribuciones de los índices de color 'u-g' y 'g-r' de las galaxias elipticas y espirales. # # ### Aplicación del método ### # Primero se plantearán las hipótesis, el desarrollo vale para los dos conjuntos de datos, indice 'u-g' y 'g-r'. # # $ H_{0}: $ No hay diferencias entre la distribuciones de índice de color para galaxias elípticas y espirales. # # $ H_{a}: $ Los valores observados de las frecuencias de los índices de color son diferentes para las galaxias elípticas y espirales # # Luego, se determina el Nivel de significación o confianza para realizar la prueba: $\alpha =$ 0,05 # # - Zona de aceptación: Para todo valor de probabilidad mayor que 0.05, se acepta $ H_{0} $ y se rechaza $H_{a}$. # - Zona de rechazo: Para todo valor de probabilidad menor que 0.05, se rechaza $ H_{0} $ y se acepta $H_{a}$. # # Para la aplicación de la prueba estadística se utilizará la función 'ks_2samp' de scipy.stats. La función, calcula el estadístico Kolmogorov-Smirnov en 2 muestras. Es una prueba de dos caras para la hipótesis nula de que 2 muestras independientes se extraen de la misma distribución continua. La hipótesis alternativa puede ser 'bilateral' (predeterminado), 'menor' o 'mayor'. # Lo que devuelve esta ks es el estadístico de prueba 'KS' ( $d^{observ} _{max}$ ) y además el 'valor de p'. # # Una vez obtenido el valor de **p** para cada grupo de datos, es decir para la distribuciones 'u-g' y las distribuciones 'g-r', se compara el mismo con el valor de $\alpha$ y se decide la conclusión correspondiente en cada caso. # # A continuación se realizarán los cálculos: # + #Nivel de confianza para el análisis a = 0.05 # + #Para el índice de color 'u-g' se tiene que: KS_ug, p_ug = scipy.stats.ks_2samp(id_elip_ug, id_sp_ug) # - #Se visualizan los valores obtenidos KS_ug, p_ug # + #Para el índice de color 'g-r' se tiene que: KS_gr, p_gr = scipy.stats.ks_2samp(id_elip_gr, id_sp_gr) # - #Se visualizan los valores obtenidos KS_gr, p_gr # + # Ahora se realiza la comparación de cada valor p con alfa p_ug < 0.05 # - p_gr < 0.05 # ### Conclusiones ### # Finalmente se verifica que los valores de p, tanto para el índice de color u-g como para el índice de color g-r, son menores que el nivel de significancia $\alpha$. Es decir que se rechaza la $H_{0}$ en ambos casos a favor de la $H_{a}$. # Con esta prueba se verifica que las galaxias elipticas y espirales no son del mismo color con un nivel de confianza del 95%. # Además de los histogramas realizados en el principio del análisis, se puede ver que las galaxias elípticas tienen valores de u-g y g-r mayores que los de las galaxias espirales lo que quiere decir que su color es en general hacia el rojo. Mientras que con las galaxias espirales pasa lo contrario y parecen ser más azules.
Ejercicio2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from matplotlib import cm import matplotlib.pyplot as plt import cv2 from keras.applications import VGG16 from keras.datasets import cifar10 from keras.models import load_model from keras.utils import to_categorical from keras import backend as K from cleverhans.attacks import FastGradientMethod, LBFGS from cleverhans.utils_keras import KerasModelWrapper from vis.utils import utils from vis.visualization import visualize_saliency, visualize_cam, overlay # - # https://jacobgil.github.io/deeplearning/class-activation-maps # + def overlay(img, heatmap, intensity=0.4): # cv2 uses BGR order img = img[:, :, ::-1].copy() heatmap = cv2.resize(heatmap, img.shape[:2]) heatmap = np.uint8(heatmap * 255) img = np.uint8(img * 255) heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) overlay = np.uint8(heatmap * intensity + img) plt.imshow(overlay[:,:,::-1]) #overlay(sample_imgs[6], heatmap, intensity=0.3) def grad_cam(model, img, label, conv_layer_idx, process=False, my_process=False): if len(img.shape) == 3: img = np.expand_dims(img, axis=0) elif len(img.shape) != 4: raise ValueError('img not shape (1, w, h, 3)') class_output = model.output[:, label] last_conv_layer = model.layers[conv_layer_idx] grads = K.gradients(class_output, last_conv_layer.output)[0] pooled_grads = K.mean(grads, axis=(0, 1, 2)) #print('Pooled grads:', pooled_grads.shape[0]) iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]]) pooled_grads_value, conv_layer_output_value = iterate([img]) for i in range(pooled_grads.shape[0]): # 512 conv_layer_output_value[:, :, i] *= pooled_grads_value[i] # The channel-wise mean of the resulting feature map # is our heatmap of class activation heatmap = np.mean(conv_layer_output_value, axis=-1) if process: #print(heatmap.max()) heatmap = np.maximum(heatmap, 0) heatmap = heatmap / heatmap.max() heatmap = cv2.resize(heatmap, (32, 32)) elif my_process: heatmap = heatmap + abs(heatmap.min()) heatmap = heatmap / heatmap.max() heatmap = cv2.resize(heatmap, (32, 32)) return heatmap def plot_pair(img, adv, model=None): if len(img.shape) != 3 or len(adv.shape) != 3: raise ValueError("Image shape must have len 3") if model: img_pred = np.argmax(model.predict(img.reshape(1, 32, 32, 3))) adv_pred = np.argmax(model.predict(adv.reshape(1, 32, 32, 3))) img_pred = CIFAR10_LABEL_NAMES[img_pred] adv_pred = CIFAR10_LABEL_NAMES[adv_pred] diff = img - adv diff = diff + abs(diff.min()) diff = diff / diff.max() fig, axs = plt.subplots(1, 3, figsize=(10, 9)) axs[0].imshow(img) axs[1].imshow(diff) axs[2].imshow(adv) if model: axs[0].set_title("Original Image" + "\nPred: " + img_pred) axs[1].set_title("Difference") axs[2].set_title("Adversarial Image" + "\nPred: " + adv_pred) else: axs[0].set_title("Original Image") axs[1].set_title("Difference") axs[2].set_title("Adversarial Image") # + (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train / 255.0 x_test = x_test / 255.0 y_test_tmp = np.squeeze(y_test) y_train = to_categorical(y_train) y_test = to_categorical(y_test) # - model = load_model('../Saved_models/cifar10_vgg_like_175.h5') model.evaluate(x_train, y_train) model.evaluate(x_test, y_test) # + label_names = {0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'} sample_imgs = [] num_classes = 10 f, axs = plt.subplots(2, 5, figsize=(13, 6)) for i, x in enumerate(divmod(i, 5) for i in range(num_classes)): img = x_test[np.argmax((y_test_tmp == i))].copy() sample_imgs.append(img) axs[x].imshow(img) axs[x].set_title(label_names[i]) axs[x].axis('off') sample_imgs = np.array(sample_imgs, dtype='float32') # - np.argmax(model.predict(sample_imgs), axis=0) model.layers[19:27] model.layers[20] # + i = 5 last_conv_idx = 20 heatmap = grad_cam(model, sample_imgs[i], i, last_conv_idx, process=True) plt.matshow(heatmap) # - heatmap = cv2.resize(heatmap, (32, 32)) plt.imshow(sample_imgs[i]) plt.imshow(heatmap, cmap='gray', alpha=0.5) f, axs = plt.subplots(2, 5, figsize=(13, 6)) for i, x in enumerate(divmod(i, 5) for i in range(num_classes)): img = sample_imgs[i] hm = grad_cam(model, img, i, last_conv_idx, process=True) axs[x].imshow(img) axs[x].imshow(hm, cmap='gray', alpha=0.5) axs[x].set_title(label_names[i]) axs[x].axis('off') K.set_learning_phase(0) ch_model = KerasModelWrapper(model) fgsm = FastGradientMethod(ch_model, sess=K.get_session()) # eps = 0.025 fgsm_params = {'eps': 0.025, 'clip_min': 0, 'clip_max': 1} adv = fgsm.generate_np(sample_imgs, **fgsm_params) pred = np.argmax(model.predict(adv), axis=0) print(pred) f, axs = plt.subplots(2, 5, figsize=(13, 6)) for i, x in enumerate(divmod(i, 5) for i in range(num_classes)): img = adv[i] #hm = grad_cam(model, img, i, last_conv_idx, process=True) axs[x].imshow(img) #axs[x].imshow(hm, cmap='gray', alpha=0.5) axs[x].set_title('True Label: ' + str(label_names[i]) + '\n' + 'Predicted Label: ' + str(label_names[pred[i]])) axs[x].axis('off') f, axs = plt.subplots(2, 5, figsize=(13, 6)) f.suptitle('Grad-CAMs for clean image') for i, x in enumerate(divmod(i, 5) for i in range(num_classes)): hm = grad_cam(model, sample_imgs[5], i, last_conv_idx, process=True) axs[x].matshow(hm, cmap='jet') #axs[x].imshow(hm, cmap='gray', alpha=0.5) axs[x].set_title(label_names[i]) axs[x].axis('off') # hm = grad_cam(model, sample_imgs[5], 8, last_conv_idx, my_process=True) # plt.imshow(hm, cmap='jet') f, axs = plt.subplots(2, 5, figsize=(13, 6)) f.suptitle('Grad-CAMs for clean image') for i, x in enumerate(divmod(i, 5) for i in range(num_classes)): hm = grad_cam(model, sample_imgs[5], i, last_conv_idx, my_process=True) axs[x].matshow(hm, cmap='jet') #axs[x].imshow(hm, cmap='gray', alpha=0.5) axs[x].set_title(label_names[i]) axs[x].axis('off') # hm = grad_cam(model, sample_imgs[5], 8, last_conv_idx, my_process=True) # plt.imshow(hm, cmap='jet') f, axs = plt.subplots(2, 5, figsize=(13, 6)) f.suptitle('Grad-CAMs for adversarial example') for i, x in enumerate(divmod(i, 5) for i in range(num_classes)): hm = grad_cam(model, adv[5], i, last_conv_idx, process=True) axs[x].matshow(hm, cmap='jet') #axs[x].imshow(hm, cmap='gray', alpha=0.5) axs[x].set_title(label_names[i]) axs[x].axis('off') tmp1 = np.vstack((sample_imgs[0], sample_imgs[3])) tmp2 = np.vstack((sample_imgs[4], sample_imgs[5])) grid = np.hstack((tmp1, tmp2)) plt.imshow(grid) grid = cv2.resize(grid, (32, 32)) hm = grad_cam(model, grid, 5, last_conv_idx, process=True) plt.matshow(hm, cmap='jet') tmp1 = np.vstack((sample_imgs[0], sample_imgs[3])) tmp2 = np.vstack((sample_imgs[4], adv[5])) grid = np.hstack((tmp1, tmp2)) grid = cv2.resize(grid, (32, 32)) plt.imshow(grid) hm = grad_cam(model, grid, 5, last_conv_idx, process=True) plt.matshow(hm, cmap='jet') label_names[np.argmax(model.predict(np.expand_dims(grid, axis=0)))]
Visualizations/cifar10_attack_vis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Erasmus+ ICCT project (2018-1-SI01-KA203-047081) # Toggle cell visibility from IPython.display import HTML tag = HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide() } else { $('div.input').show() } code_show = !code_show } $( document ).ready(code_toggle); </script> <sup>Promijeni vidljivost <a href="javascript:code_toggle()">ovdje</a>.</sup>''') display(tag) # Hide the code completely # from IPython.display import HTML # tag = HTML('''<style> # div.input { # display:none; # } # </style>''') # display(tag) # - # %matplotlib notebook import matplotlib.pyplot as plt import numpy as np import sympy as sym import scipy.signal as signal from ipywidgets import widgets, interact import control as cn # ## Geometrijsko mjesto korijena (Root locus) # # Geometrijsko mjesto korijena (Root locus) je graf položaja polova sustava zatvorene petlje u odnosu na određeni parametar (uobičajeno je to pojačanje). Može se pokazati da krivulje počinju u polovima otvorene petlje, a završavaju u nulama otvorene petlje (ili u beskonačnosti). Položaj polova sustava zatvorene petlje daje indikaciju stabilnosti sustava, a ukazuje i na druga svojstva odziva sustava poput prekoračenja, vremena porasta i vremena smirivanja. # # --- # # ### Kako koristiti ovaj interaktivni primjer? # 1. Kliknite na gumb *P0*, *P1*, *I0* ili *I1* za odabir između sljedećih objekata: proporcija nultog, prvog ili drugog reda ili integral nultog ili prvog reda. Prijenosna funkcija objekta P0 je $k_p$ (u ovom primjeru $k_p=2$), objekta P1 $\frac{k_p}{\tau s+1}$ (u ovom primjeru $k_p=1$ and $\tau=2$), objekta I0 $\frac{k_i}{s}$ (u ovom primjeru $k_i=\frac{1}{10}$) i objakta I1 $\frac{k_i}{s(\tau s +1}$ (u ovom primjeru $k_i=1$ i $\tau=10$). # 2. Kliknite na gumb *P*, *PI*, *PD* ili *PID* za odabir između proporcionalnog, proporcionalno-integracijskog, proporcionalno-derivacijskog ili proporcionalno-integracijsko-derivacijskog tipa algoritma upravljanja. # 3. Pomičite klizače da biste promijenili vrijednosti proporcionalnog ($K_p$), integracijskog ($T_i$) i derivacijskog ($T_d$) koeficijenta PID regulacije. # 4. Pomičite klizač $t_{max}$ za promjenu maksimalne vrijednosti vremena na osi x. # + A = 10 a=0.1 s, P, I, D = sym.symbols('s, P, I, D') obj = 1/(A*s) PID = P + P/(I*s) + P*D*s#/(a*D*s+1) system = obj*PID/(1+obj*PID) num = [sym.fraction(system.factor())[0].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system.factor())[0], gen=s)))] den = [sym.fraction(system.factor())[1].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system.factor())[1], gen=s)))] system_func_open = obj*PID num_open = [sym.fraction(system_func_open.factor())[0].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func_open.factor())[0], gen=s)))] den_open = [sym.fraction(system_func_open.factor())[1].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func_open.factor())[1], gen=s)))] # make figure fig = plt.figure(figsize=(9.8, 4),num='Geometrijsko mjesto korijena') plt.subplots_adjust(wspace=0.3) # add axes ax = fig.add_subplot(121) ax.grid(which='both', axis='both', color='lightgray') ax.set_title('Vremenski odziv') ax.set_xlabel('t [s]') ax.set_ylabel('ulaz, izlaz') rlocus = fig.add_subplot(122) # plot step function and responses (initalisation) input_plot, = ax.plot([],[],'C0', lw=1, label='ulaz') response_plot, = ax.plot([],[], 'C1', lw=2, label='izlaz') ax.legend() rlocus_plot, = rlocus.plot([], [], 'r') plt.show() system_open = None system_close = None def update_plot(KP, TI, TD, Time_span): global num, den, num_open, den_open global system_open, system_close num_temp = [float(i.subs(P,KP).subs(I,TI).subs(D,TD)) for i in num] den_temp = [float(i.subs(P,KP).subs(I,TI).subs(D,TD)) for i in den] system = signal.TransferFunction(num_temp, den_temp) system_close = system num_temp_open = [float(i.subs(P,KP).subs(I,TI).subs(D,TD)) for i in num_open] den_temp_open = [float(i.subs(P,KP).subs(I,TI).subs(D,TD)) for i in den_open] system_open = signal.TransferFunction(num_temp_open, den_temp_open) rlocus.clear() r, k, xlim, ylim = cn.root_locus_modified(system_open, Plot=False) # r, k = cn.root_locus(system_open, Plot=False) #rlocus.scatter(r) #plot closed loop poles and zeros poles = np.roots(system.den) rlocus.plot(np.real(poles), np.imag(poles), 'kx') zeros = np.roots(system.num) if zeros.size > 0: rlocus.plot(np.real(zeros), np.imag(zeros), 'ko', alpha=0.5) # plot open loop poles and zeros poles = np.roots(system_open.den) rlocus.plot(np.real(poles), np.imag(poles), 'x', alpha=0.5) zeros = np.roots(system_open.num) if zeros.size > 0: rlocus.plot(np.real(zeros), np.imag(zeros), 'o') #plot root locus for index, col in enumerate(r.T): rlocus.plot(np.real(col), np.imag(col), 'b', alpha=0.5) rlocus.set_title('Geometrijsko mjesto korijena') rlocus.set_xlabel('Re') rlocus.set_ylabel('Im') rlocus.grid(which='both', axis='both', color='lightgray') rlocus.axhline(linewidth=.3, color='g') rlocus.axvline(linewidth=.3, color='g') rlocus.set_ylim(ylim) rlocus.set_xlim(xlim) time = np.linspace(0, Time_span, 300) u = np.ones_like(time) u[0] = 0 time, response = signal.step(system, T=time) response_plot.set_data(time, response) input_plot.set_data(time, u) ax.set_ylim([min([np.min(u), min(response),-.1]),min(100,max([max(response)*1.05, 1, 1.05*np.max(u)]))]) ax.set_xlim([-0.1,max(time)]) plt.show() controller_ = PID object_ = obj def calc_tf(): global num, den, controller_, object_, num_open, den_open system_func = object_*controller_/(1+object_*controller_) num = [sym.fraction(system_func.factor())[0].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func.factor())[0], gen=s)))] den = [sym.fraction(system_func.factor())[1].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func.factor())[1], gen=s)))] system_func_open = object_*controller_ num_open = [sym.fraction(system_func_open.factor())[0].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func_open.factor())[0], gen=s)))] den_open = [sym.fraction(system_func_open.factor())[1].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func_open.factor())[1], gen=s)))] update_plot(Kp_widget.value, Ti_widget.value, Td_widget.value, time_span_widget.value) def transfer_func(controller_type): global controller_ proportional = P integral = P/(I*s) differential = P*D*s/(a*D*s+1) if controller_type =='P': controller_func = proportional Kp_widget.disabled=False Ti_widget.disabled=True Td_widget.disabled=True elif controller_type =='PI': controller_func = proportional+integral Kp_widget.disabled=False Ti_widget.disabled=False Td_widget.disabled=True elif controller_type == 'PD': controller_func = proportional+differential Kp_widget.disabled=False Ti_widget.disabled=True Td_widget.disabled=False else: controller_func = proportional+integral+differential Kp_widget.disabled=False Ti_widget.disabled=False Td_widget.disabled=False controller_ = controller_func calc_tf() def transfer_func_obj(object_type): global object_ if object_type == 'P0': object_ = 2 elif object_type == 'P1': object_ = 1/(2*s+1) elif object_type == 'I0': object_ = 1/(10*s) elif object_type == 'I1': object_ = 1/(s*(10*s+1)) calc_tf() style = {'description_width': 'initial'} def buttons_controller_clicked(event): controller = buttons_controller.options[buttons_controller.index] transfer_func(controller) buttons_controller = widgets.ToggleButtons( options=['P', 'PI', 'PD', 'PID'], description='Odaberite tip algoritma upravljanja:', disabled=False, style=style) buttons_controller.observe(buttons_controller_clicked) def buttons_object_clicked(event): object_ = buttons_object.options[buttons_object.index] transfer_func_obj(object_) buttons_object = widgets.ToggleButtons( options=['P0', 'P1', 'I0', 'I1'], description='Odaberite objekt:', disabled=False, style=style) buttons_object.observe(buttons_object_clicked) Kp_widget = widgets.FloatLogSlider(value=.5,min=-3,max=2.1,step=.001,description=r'\(K_p\)', disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.3f') Ti_widget = widgets.FloatLogSlider(value=1.,min=-3,max=1.8,step=.001,description=r'\(T_{i} \)', disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.3f') Td_widget = widgets.FloatLogSlider(value=1.,min=-3,max=1.8,step=.001,description=r'\(T_{d} \)', disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.3f') time_span_widget = widgets.FloatSlider(value=10.,min=.5,max=50.,step=0.1,description=r'\(t_{max} \)', disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.1f') transfer_func(buttons_controller.options[buttons_controller.index]) transfer_func_obj(buttons_object.options[buttons_object.index]) display(buttons_object) display(buttons_controller) interact(update_plot, KP=Kp_widget, TI=Ti_widget, TD=Td_widget, Time_span=time_span_widget);
ICCT_hr/examples/02/.ipynb_checkpoints/TD-18-Geometrijsko_mjesto_korijena-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # Most examples work across multiple plotting backends, this example is also available for: # # * [Matplotlib NYC Taxi Connection](../matplotlib/nyc_taxi_connections.ipynb) # + import numpy as np import networkx as nx import holoviews as hv from holoviews import opts from holoviews import dim from holoviews.element.graphs import layout_nodes from bokeh.sampledata.airport_routes import routes, airports hv.extension('bokeh') # - # # Declare data # + # Create dataset indexed by AirportID and with additional value dimension airports = hv.Dataset(airports, ['AirportID'], ['Name', 'IATA', 'City']) source_airports = list(airports.select(City='New York').data.AirportID) # Add connections count to routes then aggregate and select just routes connecting to NYC routes['connections'] = 1 nyc_graph = hv.Graph((routes, airports), ['SourceID', "DestinationID"], ['connections'], label='NYC Airport Connections')\ .aggregate(function=np.count_nonzero).select(SourceID=source_airports) # Lay out graph weighting and weight by the number of connections np.random.seed(14) graph = layout_nodes(nyc_graph, layout=nx.layout.fruchterman_reingold_layout, kwargs={'weight': 'connections'}) labels = hv.Labels(graph.nodes, ['x', 'y'], ['IATA', 'City']) # - # ## Plot # + nyc_labels = labels.select(City='New York').opts( text_color='white', yoffset=0.05, text_font_size='16pt') other_labels = labels[labels['City']!='New York'].opts( text_color='white', text_font_size='8pt') cmap = {3697: 'red', 3797: 'blue'} (graph * nyc_labels * other_labels).opts( opts.Graph(bgcolor='gray', width=800, height=800, edge_color=dim('SourceID').categorize(cmap, 'gray'), node_color=dim('index').categorize(cmap, 'gray'), padding=0.1, title='NYC Airport Connections', xaxis=None, yaxis=None) )
examples/gallery/demos/bokeh/nyc_airport_connections.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:torch] * # language: python # name: conda-env-torch-py # --- # + import numpy as np import torch from torch import nn from sklearn import datasets import matplotlib.pyplot as plt w0 = 0.125 b0 = 5. x_range = [-20, 60] def load_dataset(n=150, n_tst=150): np.random.seed(43) def s(x): g = (x - x_range[0]) / (x_range[1] - x_range[0]) return 3 * (0.25 + g**2.) x = (x_range[1] - x_range[0]) * np.random.rand(n) + x_range[0] eps = np.random.randn(n) * s(x) y = (w0 * x * (1. + np.sin(x)) + b0) + eps y = (y - y.mean()) / y.std() idx = np.argsort(x) x = x[idx] y = y[idx] return y[:, None], x[:, None] y, x = load_dataset() # - plt.plot(x,y,'.') plt.show() len(x) X = torch.tensor(x, dtype=torch.float) Y = torch.tensor(y, dtype=torch.float) # - First we&rsquo;ll model a neural network $g_{\theta}(x)$ with maximum likelihood estimation. Where we assume a Gaussian likelihood. # $$\begin{equation} # y \sim \mathcal{N}(g_{\theta}(x), \sigma^2) # \end{equation}$$ # $$ \begin{equation}\hat{\theta}_{\text{MLE}} = \text{argmax}_\theta \prod_i^nP(y_i|\theta) \end{equation}$$ class MaximumLikelihood(nn.Module): def __init__(self): super().__init__() self.out = nn.Sequential( nn.Linear(1, 20), nn.ReLU(), nn.Linear(20, 1) ) def forward(self, x): return self.out(x) epochs = 200 m = MaximumLikelihood() optim = torch.optim.Adam(m.parameters(), lr=0.01) for epoch in range(epochs): optim.zero_grad() y_pred = m(X) loss = (0.5 * (y_pred - Y)**2).mean() loss.backward() optim.step() m.eval() y_estimate = m(X) plt.figure(figsize=(10, 5)) plt.plot(x,y, 'b.', alpha=0.8) plt.plot(x, y_estimate.detach().numpy(), 'r', alpha=0.6) plt.show() # - We are able to predict the expectation of $y$, but we are not able to make a statement about the uncertainty of our predictions. # - In variational inference, we accept that we cannot obtain the true posterior $P(y|x)$, but we try to approximate this distribution with another distribution $Q_{\theta}(y)$, where $\theta$ are the variational parameters. This distribution we call a variational distribution. # - If we choose a factorized (diagonal) Gaussian variational distribution, we define a function $g_{\theta}: x \mapsto \mu, \sigma$. The function $g_{\theta}$ will be a neural network that predicts the variational parameters. # - The total model can thus be described as: # $$ \begin{equation}P(y) = \mathcal{N}(0, 1) \end{equation}$$ # where we set a unit Gaussian prior $P(y)$. # - from now on we will generalize to a notation that is often used. We&rsquo;ll extend $y|x$ to any (latent) stochastic variable $Z$. # -------------------------------- # - Variational inference is done by maximizing the ELBO: # $$ \begin{equation}\text{argmax}_{Z} = E_{Z \sim Q}[\underbrace{\log P(D|Z)}_{\text{likelihood}}] - D_{KL}(Q(Z)||\underbrace{P(Z)}_{\text{prior}}) \label{eq:elbo} \end{equation}$$ # - Let's rewrite this ELBO definition so that it is more clear how we can use it to optimize the model. # $$E_{Z \sim Q}[\log P(D|Z)] + E_{Z \sim Q}[ \frac{P(Z)}{Q(Z)}]dZ$$ # $$E_{Z \sim Q}[\log P(D|Z)] + E_{Z \sim Q}[\log P(Z) - \log Q(Z)]$$ # #### Monte Carlo ELBO and reparameterization trick # - Deriving those expectations maybe not possible, thus we can get estimates of the true expectation by taking samples from $Q(Z)$ and average over those results. # - If we start taking samples from a $Q(Z)$ we leave the deterministic world, and the gradient can not flow through the model anymore. We avoid this problem by reparameterizing the samples from the distribution. # - Instead of sampling directly from the variational distribution, $z \sim Q(\mu, \sigma^2)$, we sample from a unit gaussian and recreate samples from the variational distribution. Now the stochasticity of $\epsilon$ is external and will not prevent the flow of gradients. class VI(nn.Module): def __init__(self): super().__init__() self.q_mu = nn.Sequential( nn.Linear(1, 20), nn.ReLU(), nn.Linear(20, 10), nn.ReLU(), nn.Linear(10, 1) ) self.q_log_var = nn.Sequential( nn.Linear(1, 20), nn.ReLU(), nn.Linear(20, 10), nn.ReLU(), nn.Linear(10, 1) ) def reparameterize(self, mu, log_var): sigma = torch.exp(0.5 * log_var) + 1e-5 epsilon = torch.randn_like(sigma) return mu + sigma * epsilon def forward(self, x): mu = self.q_mu(x) log_var = self.q_log_var(x) return self.reparameterize(mu, log_var), mu, log_var # $$ \begin{aligned} \log p(\mathbf{y}|\mathbf{X, \mu, \sigma}) &= \sum_{i=1}^N \log N(y_i;\mathbf{\mu,\sigma^2}) \\ &= \sum_{i=1}^N \log \frac{1}{\sqrt{2\pi\sigma^2_e}}\exp (-\frac{(y_i - \mathbf{\mu})^2}{2\sigma^2_e}) \\ &= -\frac{N}{2}\log 2\pi\sigma^2_e - \sum_{i=1}^N \frac{(y_i-\mathbf{\mu)^2}}{2\sigma^2_e} \end{aligned}$$ def ll_gaussian(y, mu, log_var): #log-likelihood of gaussian sigma = torch.exp(0.5 * log_var) return -0.5 * torch.log(2 * np.pi * sigma**2) - (1 / (2 * sigma**2))* (y-mu)**2 def elbo(y_pred, y, mu, log_var): # likelihood of observing y given Variational mu and sigma likelihood = ll_gaussian(y, mu, log_var) # prior probability of y_pred log_prior = ll_gaussian(y_pred, 0, torch.log(torch.tensor(1.))) # variational probability of y_pred log_p_q = ll_gaussian(y_pred, mu, log_var) # by taking the mean we approximate the expectation return (likelihood + log_prior - log_p_q).mean() def det_loss(y_pred, y, mu, log_var): return -elbo(y_pred, y, mu, log_var) # + epochs = 1500 m = VI() optim = torch.optim.Adam(m.parameters(), lr=0.005) for epoch in range(epochs): optim.zero_grad() y_pred, mu, log_var = m(X) loss = det_loss(y_pred, Y, mu, log_var) loss.backward() optim.step() # + # draw samples from Q(theta) with torch.no_grad(): y_pred = torch.cat([m(X)[0] for _ in range(1000)], dim=1) # Get some quantiles q1, mu, q2 = np.quantile(y_pred, [0.05, 0.5, 0.95], axis=1) plt.figure(figsize=(10, 5)) plt.scatter(X, Y, s=10) plt.plot(X, mu, 'r', alpha=0.6) plt.fill_between(X.flatten(), q1, q2, alpha=0.2) # - # #### Analytical KL-divergence # - Above we have implemented ELBO by sampling from the variational posterior. It turns out that for the KL-divergence term, this isn’t necessary as there is an analytical solution: # $$D_{KL}(Q(Z)||P(Z)) = \frac{1}{2}\sum_{i=1}^n(1+\log \sigma_i^2 - \mu_i^2 - \sigma_i^2)$$ # - For the likelihood term, we did implement Guassian log likelihood, this term can also be replaced with a similar loss functions. For Gaussian likelihood we can use squared mean error loss. # - We can simplify the loss function as defined below: # ``` # def det_loss(y, y_pred, mu, log_var): # reconstruction_error = (0.5 * (y - y_pred)**2).sum() # kl_divergence = (-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp())) # # return (reconstruction_error + kl_divergence).sum() # ``` # #### Aleatoric and epistemic uncertainty # - In the example above we were able to model the *aleatoric uncertainty*. # - This is the *inherent variance* in the data which we have to accept because the underlaying data generation process is stochastic in nature. (e.g. throwing the dice, cards you get in a poker game) # - Aleatory can have two flavors, being *homoscedastic* and *heteroscedastic*. # - homoscedastic: For example in the model definition of linear regression $y = X \beta + \epsilon$ we incorporate $\epsilon$ for the noise in the data. In linear regression, $\epsilon$ is not dependent on $X$ and is therefore assumed to be constant. # - heteroscedastic: If the aleatoric uncertainty is dependent on $X$, we speak of heteroscedastic uncertainty. # - Epistemic uncertainty can be reduced by designing new model, acquiring more data, etc. # - In the above example data has inherent noise which can't be reduced but when we generate model ensemble using dropout and measure only the *model inconsistency*, that epistemic uncertainty is low as shown in below. (because most of the uncertainty comes from aleatoric uncertainty in this case) # <img src=attachment:image.png width=500>
deep_learning/uncertainty-deep-learning-master/06. Aleatoric-epistemic uncertainty (toy example).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # name: python2 # --- import os import getpass from passlib.hash import pbkdf2_sha256 import crypt, getpass, spwd from Crypto.PublicKey import RSA fuliz = os.listdir('/home') fuliz # + #Chabge password for user #pasuz = raw_input('User to change password: ') # + #logtest = getpass.getpass('new password: ') #loghash = pbkdf2_sha256.encrypt(logtest, rounds=200000, salt_size=16) #vercryp = pbkdf2_sha256.verify(logtest, hashez) # + #Enter user to delete. #Even better, user to lock. #delusa = raw_input('User to delete: ') #locusa = raw_input('User to lock: ') #os.system('sudo passwd -l ' + locusa) # + #Read hashed passwords from /etc/shadow #opshad = open('/etc/shadow', 'r') #opshad.read() #opshad.close() #Better to do this with a python module. spwd reads #shadow files done. #Need to getpass and ask for password, comparing to #the password returned from spwd # - pan = raw_input("Enter Username to delete: ") # + #enc_pwd = spwd.getspnam(pan)[1] #if enc_pwd in ["NP", "!", "", None]: # print "user '%s' has no password set" % pan #if enc_pwd in ["LK", "*"]: # print "account is locked" #if enc_pwd == "!!": # print "password has expired" # - gpas = getpass.getpass('Enter Username Password: ') encpass = spwd.getspnam(pan)[1] if crypt.crypt(gpas, encpass) == encpass: print ('True') else: print "incorrect password" # + #print spwd.getspnam(pan)[1] # + #shpa = spwd.getspnam('wcmckee')[1] # + #spwd.getspall() # + #shpa # - # rsa key generated for each user and stored in their # /home/user/.ssh/ folder. Public key is emailed, added to test servers. # + new_key = RSA.generate(2048, e=65537) public_key = new_key.publickey().exportKey("PEM") private_key = new_key.exportKey("PEM") print private_key sapriv = open('/home/wcmckee/.ssh/' + pan, 'w') sapriv.write(private_key) sapriv.close() print public_key papriv = open('/home/wcmckee/.ssh/' + pan + '.pub', 'w') papriv.write(public_key) papriv.close() # + #Spin up digital ocean server, with public key and #user created. # + #import digitalocean # + #tok = ('<KEY>') # + #digid = digitalocean.Manager(token='<KEY>') # + #digid.get_account # + #my_droplets = digid.get_all_droplets() # + #lisdrop = [] # - #for myd in my_droplets: # print myd #lisdrop.append(myd.image) # # lisdrop.append(myd.ip_address) # + #lisdrop # - #droplet = digitalocean.Droplet(token=tok, # name='Example', # region='nyc2', # New York 2 ## image= , # Ubuntu 14.04 x64 # size_slug='512mb', # 512MB # backups=True) # + #droplet.create() # + #dimg = digid.get_all_images() # + #for di in dimg: ## print di # + #opdel = os.listdir('/home/wcmckee/signinlca/deleteusers') # + #opdel # - usrtodel = raw_input('Account to delete: ') locacc = os.system('sudo passwd -l ' + usrtodel) locacc os.system('mv ' + ' ' + '/home/wcmckee/signinlca/usernames/' + usrtodel + ' /home/wcmckee/signinlca/username/deleteusers/')
.ipynb_checkpoints/pgguserdel-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- import myutils as my import pandas as pd import seaborn as sns import numpy as np from sklearn.manifold import TSNE # %matplotlib inline # %load_ext rpy2.ipython df = pd.read_table("./data/glove.6B.100d.txt", delimiter=" ", header=None, index_col=0, quoting=3) df = pd.DataFrame(pd.read_csv("./data/minimal.50d.4f.csv")).set_index("0") lens = (df**2).sum(axis=1).sort_values() dfn = df.div(np.sqrt(lens), axis='index') my.similar(dfn, dfn.loc["war"], 10) my.least_similar(dfn, dfn.loc["sunday"]-dfn.loc["saturday"]) my.similar(dfn, dfn.loc["jealous"]-dfn.loc["envious"]) my.similar(dfn, -dfn.loc["famous"]+dfn.loc["popular"]) from numpy.linalg import norm def dot_norm(a, b): return a.dot(b)/(norm(a)*norm(b)) dot_norm(df.loc["he"] - df.loc["she"], df.loc["man"] - df.loc["woman"]) def superfunkcja (a, b, c, d) : return dot_norm(df.loc[a] - df.loc[b], df.loc[c] - df.loc[d]) superfunkcja("he", "she", "chair", "table") superfunkcja("he", "she", "good", "bad") superfunkcja("envious", "jealous", "good", "bad") superfunkcja("terrorist", "fighter", "good", "bad") superfunkcja("terrorist", "soldier", "good", "bad") superfunkcja("giant", "dwarf", "big", "small") superfunkcja("up", "down", "good", "bad") superfunkcja("warm", "cold", "close", "far") df.loc["chair"].dot(df.loc["love"])/(norm(df.loc["chair"])*norm(df.loc["love"])) df.loc["leaf"].dot(df.loc["belief"])/(norm(df.loc["leaf"])*norm(df.loc["belief"])) df.loc["habit"].dot(df.loc["ventilation"])/(norm(df.loc["habit"])*norm(df.loc["ventilation"])) df.loc["air"].dot(df.loc["triangle"])/(norm(df.loc["air"])*norm(df.loc["triangle"])) df.loc["chair"].dot(df.loc["table"])/(norm(df.loc["chair"])*norm(df.loc["table"])) def simil(a, b): return df.loc[a].dot(df.loc[b])/(norm(df.loc[a])*norm(df.loc[b])) simil("pen", "pencil") simil("car", "bus") simil("violin", "piano") simil("hot", "attractive") simil("theory", "building") simil("close", "warm") simil("past", "back") simil("war", "argument") simil("discussion", "battle") simil("discussion", "cooperation") simil("argument", "cooperation")
notebooks_julia/4_similars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] editable=true # # Exploratory Data Analysis (EDA): understand the input data # + editable=true import boto3 import configparser # + editable=true config = configparser.ConfigParser() config.read('dwh.cfg') # AWS KEY = config.get('AWS','KEY') SECRET = config.get('AWS','SECRET') # + editable=true s3c = boto3.client('s3', region_name='eu-west-2', aws_access_key_id=KEY, aws_secret_access_key=SECRET ) s3r = boto3.resource('s3', region_name='eu-west-2', aws_access_key_id=KEY, aws_secret_access_key=SECRET ) udacity_bucket = s3r.Bucket("udacity-dend") # + [markdown] editable=true # ### Look into input files for the `song_data` # # $\rightarrow$ Contains JSON files with song metadata (artist_id, artist_latitude, ..., year) # + editable=true for i, obj in enumerate(udacity_bucket.objects.filter(Prefix='song_data')): print(obj) if i > 3: break # + editable=true # print the content of the first file # boto3 s3 resource has no get_object print(s3c.get_object(Bucket='udacity-dend', Key='song_data/A/A/A/TRAAAAK128F9318786.json')\ ['Body'].read().decode('utf-8')[:500]) # + [markdown] editable=true # ### Look into input files for the `log_data` # # $\rightarrow$ Contain JSON files with user activity, ie songs played (with artist, authorisation status, ...., user_id) # + editable=true # list Log Data # boto3 s3 client has no attribute 'Bucket' song_data_bucket = s3r.Bucket("udacity-dend") for i, obj in enumerate(udacity_bucket.objects.filter(Prefix='log_data')): print(obj) if i > 5: break # + editable=true # print the content of the first file print(s3c.get_object(Bucket='udacity-dend', Key='log_data/2018/11/2018-11-01-events.json')\ ['Body'].read().decode('utf-8')[:500]) # + [markdown] editable=true # $\rightarrow$ error in load of `staging_events`: # # 1. check in AWS Redshift Query editor `select * from stl_load_errors` # 2. error "Invalid timestamp format or value [YYYY-MM-DD HH24:MI:SS]" # 3. From sample data above: "ts":1541105830796 is not a timestamp # -> might use BIGINT # -> or adapt LOAD SQL -> chose this, compare sql_queries.py # + [markdown] editable=true # ### Look into input file `log_json_path.json` # # $\rightarrow$ Contains column headers # + editable=true # log_json_path.json # inspired by https://www.slsmk.com/use-boto3-to-open-an-aws-s3-file-directly/ print(s3c.get_object(Bucket='udacity-dend', Key='log_json_path.json')['Body'].read().decode('utf-8')) # + editable=true s3c # + [markdown] editable=true # Get an impression of the song_data size # + editable=true #inspired by https://newbedev.com/how-to-find-size-of-a-folder-inside-an-s3-bucket: top_level_folders = dict() num_files = 0 for key in s3c.list_objects(Bucket='udacity-dend')['Contents']: folder = key['Key'].split('/')[0] if folder == 'song-data': if num_files < 5: print("Key %s in folder %s. %d bytes" % (key['Key'], folder, key['Size'])) num_files += 1 print(num_files) # + [markdown] editable=true # # Develop the print statements in etl.py # + editable=true # Develop print statement in etl.py from sql_queries import copy_table_queries, insert_table_queries for query in insert_table_queries: print(query.strip().split(' ')[2])
EDA_input_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Navigation # # --- # # ### 1. Start the Environment # !pip -q install ./python from unityagents import UnityEnvironment import random import torch import numpy as np from collections import deque import matplotlib.pyplot as plt # %matplotlib inline # !pip install torchsummary from torchsummary import summary from dqn_agent import Agent # please do not modify the line below env = UnityEnvironment(file_name="/data/Banana_Linux_NoVis/Banana.x86_64") # Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here I check for the first brain available, and set it as the default brain I will be controlling from Python. # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # ### 2. Examine the State and Action Spaces # + # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents in the environment print('Number of agents:', len(env_info.agents)) # number of actions action_size = brain.vector_action_space_size print('Number of actions:', action_size) # examine the state space state = env_info.vector_observations[0] print('States look like:', state) state_size = len(state) print('States have length:', state_size) # - # ### 3. Take Random Actions in the Environment # + env_info = env.reset(train_mode=True)[brain_name] # reset the environment state = env_info.vector_observations[0] # get the current state score = 0 # initialize the score while True: action = np.random.randint(action_size) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished score += reward # update the score state = next_state # roll over the state to next time step if done: # exit loop if episode finished break print("Score: {}".format(score)) # - # ### 4. Train the Agent with DQN agent = Agent(state_size=37, action_size=4, seed=0) summary(agent.qnetwork_local, (37,)) def dqn(n_episodes=1000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995): """Deep Q-Learning. Params ====== n_episodes (int): maximum number of training episodes max_t (int): maximum number of timesteps per episode eps_start (float): starting value of epsilon, for epsilon-greedy action selection eps_end (float): minimum value of epsilon eps_decay (float): multiplicative factor (per episode) for decreasing epsilon """ scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores eps = eps_start # initialize epsilon for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] score = 0 for t in range(max_t): action = agent.act(state, eps) env_info = env.step(action)[brain_name] next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0] agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) # save most recent score scores.append(score) # save most recent score eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=13.0: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') break return scores scores = dqn() # plot the scores fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores) plt.ylabel('Score') plt.xlabel('Episode #') plt.show() # ### 5. Watch a Smart Agent! # load the weights from file agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth')) num_episodes = 10 scores = [] for i_episode in range(1,num_episodes+1): env_info = env.reset(train_mode=True)[brain_name] # reset the environment state = env_info.vector_observations[0] # get the current state score = 0 # initialize the score while True: action = agent.act(state) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished score += reward # update the score state = next_state # roll over the state to next time step if done: scores.append(score) break # plot the scores fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores) plt.ylabel('Score') plt.xlabel('Episode #') ax.set_ylim(bottom=0) plt.show()
Navigation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Introduction # # Welcome! In this set of tutorials you will learn about image classification using quantized neural networks (QNNs), and what kind of computations take place. # # ## What We Are Trying To Do # # For the purposes of this tutorial, we will view the QNN as a gray box. We will put in an image, do some operations, and get out a *classification result* which tells us what the QNN thinks this image is. The twenty-thousand feet view of how this goes is something like this: # # 1. We put in an image in the form of pixels, i.e. an array of numbers. # 2. We multiply those pixel values (numbers) with some other numbers, which are the neural network weights, add them together, and perform some other simple operations. # 4. We will repeat step 2 a couple of times with different weights. # 3. At the end, we will obtain an array of numbers, one number for each class that the QNN knows about. The class with the largest number is the QNN's best guess on what the image is, the second largest is the second best guess, and so on. # # We won't concern ourselves with *where* the weights come from -- this tutorial will simply provide you with several pre-trained QNNs for that purpose. If you'd like to know more about neural networks in general, [here](https://github.com/stephencwelch/Neural-Networks-Demystified) is a popular tutorial in Jupyter Notebook form with accompanying YouTube videos, alongside countless other resources on the Internet. # # ## OK, Let's Do It! # # We'll start with a classical example in neural networks: classifying 28x28 grayscale images of digits (0 to 9). Let's load an image and see what it looks like first. # + from PIL import Image from matplotlib.pyplot import imshow import numpy as np # load image using PIL img = Image.open("7.png") # convert to black and white img = img.convert("L") # convert to numpy array img = np.asarray(img) # display % matplotlib inline imshow(img, cmap='gray') # - # Looks like a seven to me, but to get a useful reminder of what images look like to a computer by default, let's have a look at the numpy array itself: print(img.shape) img # It's all just numbers in a 28x28 array! Now let's see what the neural network says about this data. We will start by loading the QNN from the file it is stored in which is a [Python Pickle](https://wiki.python.org/moin/UsingPickle). # + from QNN.layers import * import pickle qnn = pickle.load(open("mnist-w1a1.pickle", "rb")) qnn # - # As you can see, the QNN consists of several *layers*. The QNN we loaded seems to contain four types of layers: BipolarThresholding, FullyConnected, ScaleShift and Softmax. We will cover what all these do in more detail later on. Right now, let's just see if it works! The QNN module that we just imported contains a function called predict: # get the predictions array res = predict(qnn, img) # return the index of the largest prediction winner_ind = np.argmax(res) # the sum of the output values add up to 1 due to softmax, # so we can interpret them as probabilities winner_prob = 100 * res[winner_ind] print(res) print("The QNN predicts this is a %d with %f percent probability" % (winner_ind, winner_prob)) # And our first image classification with a QNN is a success! In the following section, we will take a closer look at the computation that is taking place inside the .execute() functions for this network, and later we will cover more advanced types of networks.
0-basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import napari from napari.utils import nbscreenshot import numpy as np from skimage import draw from morphometrics.utils.surface_utils import binary_mask_to_surface # - # create an image with an ellispoid object label_image = np.zeros((100, 100, 100), dtype=np.uint16) label_image[19:82, 29:72, 29:72] = draw.ellipsoid(30, 20, 20) # mesh the ellipsoid mesh = binary_mask_to_surface( label_image, n_mesh_smoothing_interations=10 ) # + # create a napari viewer viewer = napari.Viewer(ndisplay=3) # add the image to the viewer viewer.add_image( label_image, rendering="iso", iso_threshold=0, visible=False ) # add the mesh to the viewer as a surface layer # see the napari docs for information on the surface layer # https://napari.org/howtos/layers/surface.html viewer.add_surface( (mesh.vertices, mesh.faces, np.ones((len(mesh.vertices),))) ) # - nbscreenshot(viewer)
examples/mesh_binary_mask.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Gathering for Power Analysis # # We've previously done power analyses for the Growth Team based on registration data from 2018. In this case, we want an updated analysis with more wikis, in order to understand how expanding to additional wikis will impact our statistical power. # # This work is tracked in [T250120](https://phabricator.wikimedia.org/T250120). # # The task asks to compare four groups of wikis: # # * Original target wikis: Czech, Korean, Arabic, Vietnamese # * Current set: Czech, Korean, Arabic, Vietnamese, Ukrainian, Hungarian, Armenian, Basque # * Adding just French: Czech, Korean, Arabic, Vietnamese, Ukrainian, Hungarian, Armenian, Basque, French # * Adding our next set: Czech, Korean, Arabic, Vietnamese, Ukrainian, Hungarian, Armenian, Basque, French, Polish, Persian, Swedish, Danish, Indonesian, Italian, Portuguese. # # The first part of this is to gather registration, activation, and retention numbers for those wikis, for both desktop and mobile registrations. # + import datetime as dt import pandas as pd import numpy as np from wmfdata import hive # + ## Configuration variables ## Original target, then the next four, then the additional set wikis = ['cswiki', 'kowiki', 'viwiki', 'arwiki', 'ukwiki', 'huwiki', 'hywiki', 'euwiki', 'frwiki', 'plwiki', 'fawiki', 'svwiki', 'dawiki', 'idwiki', 'itwiki', 'ptwiki'] ## Activity tends to follow a yearly cycle, so let's use that. start_date = '2019-01-01' end_date = '2020-01-01' ## The mediawiki_history snapshot that we'll be using snapshot = '2020-03' # - activity_query = ''' WITH regs AS ( SELECT wiki_db, event_user_id, date_format(event_user_creation_timestamp, "yyyy-MM-01") as reg_month FROM wmf.mediawiki_history WHERE snapshot = "{snapshot}" AND event_entity = "user" AND event_type = "create" AND event_user_is_created_by_self = TRUE AND size(event_user_is_bot_by_historical) = 0 AND wiki_db IN ({wiki_list}) AND event_user_creation_timestamp >= "{start_time}" AND event_user_creation_timestamp < "{end_time}" ), mobile_data AS ( SELECT wiki AS wiki_db, event.userid AS user_id, IF(event.displaymobile, 'mobile', 'desktop') AS platform FROM event_sanitized.serversideaccountcreation WHERE year = 2019 AND wiki IN ({wiki_list}) AND event.isselfmade = true ), edits AS ( SELECT wiki_db, event_user_id, SUM(IF(unix_timestamp(event_timestamp) - unix_timestamp(event_user_creation_timestamp) < 86400 , 1, 0)) AS activation_edits, SUM(IF(unix_timestamp(event_timestamp) - unix_timestamp(event_user_creation_timestamp) BETWEEN 86400 AND 15*86400, 1, 0)) AS retention_edits FROM wmf.mediawiki_history WHERE snapshot = "{snapshot}" AND event_entity = "revision" AND event_type = "create" AND wiki_db IN ({wiki_list}) AND event_user_creation_timestamp >= "{start_time}" AND event_user_creation_timestamp < "{end_time}" AND SIZE(event_user_is_bot_by_historical) = 0 GROUP BY wiki_db, event_user_id ) SELECT regs.wiki_db, regs.event_user_id AS user_id, regs.reg_month, mobile_data.platform, coalesce(edits.activation_edits, 0) AS activation_edits, coalesce(edits.retention_edits, 0) AS retention_edits FROM regs JOIN mobile_data ON regs.wiki_db = mobile_data.wiki_db AND regs.event_user_id = mobile_data.user_id LEFT JOIN edits ON regs.wiki_db = edits.wiki_db AND regs.event_user_id = edits.event_user_id ''' # Grab user activity data: user_activity = hive.run(activity_query.format( snapshot = snapshot, wiki_list = ','.join('"{}"'.format(w) for w in wikis), start_time = start_date, end_time = end_date )) # Add boolean flags for whether a user is activated or retained: user_activity['is_activated'] = user_activity['activation_edits'] > 0 user_activity['is_retained'] = user_activity['is_activated'] & (user_activity['retention_edits'] > 0) # Aggregate per wiki, platform, and month of registration counts of registrations, activations, and retentions: registrations_agg = (user_activity.groupby(['wiki_db', 'reg_month', 'platform']) .agg({'user_id' : 'count'}) .rename(columns = {'user_id' : 'n_registered'})) activations_agg = (user_activity.loc[user_activity['is_activated'] == True] .groupby(['wiki_db', 'reg_month', 'platform']) .agg({'user_id' : 'count'}) .rename(columns = {'user_id' : 'n_activated'})) retentions_agg = (user_activity.loc[user_activity['is_retained'] == True] .groupby(['wiki_db', 'reg_month', 'platform']) .agg({'user_id' : 'count'}) .rename(columns = {'user_id' : 'n_retained'})) # Merge the three aggregations to combine: full_data = (registrations_agg.merge(activations_agg, how = 'left', left_index = True, right_index = True) .merge(retentions_agg, how = 'left', left_index = True, right_index = True) .fillna(0).reset_index()) # + ## Calculate activation and retention proportions full_data['prop_activated'] = full_data['n_activated'] / full_data['n_registered'] full_data['prop_retained'] = full_data['n_retained'] / full_data['n_activated'] # - # Aggregate over the whole year and calculate monthly averages: fullyear_agg = (full_data.groupby(['wiki_db', 'platform']) .agg({'n_registered' : 'mean', 'n_activated' : 'mean', 'n_retained' : 'mean', 'prop_activated' : 'mean', 'prop_retained' : 'mean'}) .reset_index()) # Write the resulting dataframe out as a TSV for import into R. fullyear_agg.to_csv('datasets/aggregate_statistics.tsv', header = True, index = False, sep = '\t')
01_data_gathering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 3. SciPy (Linear Programming) and Application (Drones) # #### SciPy # # SciPy (pronounced “Sigh Pie”) is open-source software for mathematics, science, and engineering. # Topics: https://docs.scipy.org/doc/scipy/reference/ # See Example at: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html#scipy.optimize.linprog # <img src="files/scipyproblem.jpg" alt="Drawing" style="width: 500px;"/> # <span style="font-family:Comic Sans MS"><center>From docs.scipy.org</center></span> # # + from scipy.optimize import linprog c = [-1, 4] A = [[-3, 1], [1, 2]] b = [6, 4] x0_bounds = (None, None) x1_bounds = (-3, None) res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]) res # - # Time for some inspiration: https://youtu.be/w2itwFJCgFQ?start=378&end=676 # # + from IPython.display import HTML # Youtube HTML('<iframe width="960" height="560" src="https://www.youtube.com/embed/w2itwFJCgFQ?start=378&amp;end=676" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>') # - # ### 4. Graph Theory (NetworkX) and Wrap-up # #### Graph Theory (NetworkX) # # NetworkX is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks. The audience for NetworkX includes mathematicians, physicists, biologists, computer scientists, and social scientists. Graph Theory -> Trajectory planning # Reference: https://networkx.org/documentation/stable/reference/index.html # MST:https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.tree.mst.minimum_spanning_tree.html#networkx.algorithms.tree.mst.minimum_spanning_tree # Gallery: https://networkx.org/documentation/stable/auto_examples/index.html # + #import ipyvolume as ipv #Imports a library called ipyvolume to help with plotting #import numpy as np #Library import for array creation, and manipulation import networkx as nx G = nx.Graph() G # + G.add_node(1) G.add_edge(3, 4) G.add_node('E') print(G.nodes) print(G.edges) print(G.adj[3]) # or list(G.neighbors(1)) # + import matplotlib.pyplot as plt plt.figure() nx.draw(G, with_labels=True) plt.show() # + #import ipyvolume as ipv #Imports a library called ipyvolume to help with plotting #import numpy as np #Library import for array creation, and manipulation import networkx as nx import matplotlib.pyplot as plt plt.figure() G = nx.petersen_graph() plt.subplot(121) nx.draw(G, with_labels=True, font_weight='bold') plt.subplot(122) nx.draw_shell(G, nlist=[range(5, 10), range(5)], with_labels=True, font_weight='bold') plt.show() # - # #### Transshipment Problem (From Class) # # <img src="files/Transshipment problem.jpg" alt="Drawing" style="width: 600px;"/> # # + G = nx.DiGraph() G.add_nodes_from(["Residue","Boston","Newark","Columbus","Atlanta","Richmond","Mobile","Jacksonville"]) G.add_weighted_edges_from([ ("Newark", "Residue", 100000), ("Jacksonville", "Residue", 100000), ("Newark", "Boston", 30), ("Boston", "Columbus", 50), ("Newark", "Richmond", 40), ("Jacksonville", "Richmond", 50), ("Jacksonville", "Mobile", 50), ("Jacksonville", "Atlanta", 45), ("Atlanta", "Columbus", 40), ("Columbus", "Atlanta", 35), ("Atlanta", "Mobile", 35), ("Mobile", "Atlanta", 25), ("Atlanta", "Richmond", 30) ], weight = "cost") G.node["Boston"]["Demand"] = 100 G.node["Columbus"]["Demand"] = 60 G.node["Atlanta"]["Demand"] = 170 G.node["Richmond"]["Demand"] = 80 G.node["Mobile"]["Demand"] = 70 G.node["Newark"]["Demand"] = -200 G.node["Jacksonville"]["Demand"] = -300 G.node["Residue"]["Demand"] = 20 plt.figure() pos = nx.spring_layout(G) nx.draw_networkx_nodes(G, pos, with_labels=False) nx.draw_networkx_edges(G, pos, width=2) for p in pos: # raise text positions pos[p][1] += 0.09 nx.draw_networkx_labels(G, pos) plt.show() # - # <img src="files/Transshipment problem.jpg" alt="Drawing" style="width: 600px;"/> # # Solve using the network simplex algorithm from the NetworkX package/library # Reference: https://networkx.org/documentation/networkx-1.11/reference/generated/networkx.algorithms.flow.network_simplex.html flowCost, flowDict = nx.network_simplex(G, weight='cost', demand="Demand") # print(flowCost - 20*100000) flowDict # <span style="font-family:Comic Sans MS">->>> Chinese Postman Problem -->>> </span> # Start Here: https://www.datacamp.com/community/tutorials/networkx-python-graph-tutorial # Then Here:http://brooksandrew.github.io/simpleblog/articles/sleeping-giant-rural-postman-problem
Presentation - Optimization with Python P2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/keithvtls/Numerical-Method-Activities/blob/main/Midterm/58015_CurveFitting_Yon.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="D8TsR0yQrx8U" # ## Members: # # <NAME> © 2021 # # <NAME> © 2021 # # <NAME> © 2021 # # <NAME> © 2021 # + id="sKHIwUQFacN-" import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn import datasets from sklearn.linear_model import LinearRegression from sklearn import metrics from sklearn import linear_model from sklearn.metrics import mean_squared_error, r2_score # + [markdown] id="8JTx5N92BaRV" # ### Problem: # # Determining people who have diabetes that is high sugar level with high blood pressure. # + id="qHvT9TJEGmf_" diabetes_data = datasets.load_diabetes() # + colab={"base_uri": "https://localhost:8080/"} id="3wRUdDZMZYEL" outputId="1aea853c-3d85-44d2-db07-06ab07f3dbef" diabetes_data.keys() # + colab={"base_uri": "https://localhost:8080/", "height": 378} id="F2n7Fd7WZgyB" outputId="c826afb2-5419-46f8-b4d7-394baf372c85" diabetes = pd.DataFrame(diabetes_data.data, columns=diabetes_data.feature_names) diabetes.describe() # + colab={"base_uri": "https://localhost:8080/"} id="rkOx6hwfZxm9" outputId="b714a776-d37f-41dd-f778-940c03c94c6e" print(diabetes_data.DESCR) # + id="zQNP7bx0qv0w" X = diabetes['bp'].values.reshape(-1,1) y = diabetes['s6'].values.reshape(-1,1) # + colab={"base_uri": "https://localhost:8080/"} id="JxYoZ7zmxIYE" outputId="49a9762c-8291-4e4c-c6b7-404e81ab8d0c" regr = linear_model.LinearRegression() regr.fit(X, y) print('w0:', regr.intercept_) print('w1:', regr.coef_) # + [markdown] id="hNaSkcF6yiAT" # ### Multiple Linear Regression # + id="pcV1yzNzt14n" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=1) # + id="wojIW6JhabZg" colab={"base_uri": "https://localhost:8080/"} outputId="5d19569b-5fba-45f3-960f-93530fa29369" model = LinearRegression() model.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 108} id="PcghDCB0r_hj" outputId="efa1d041-1c22-4405-aff4-70bcdc11cb36" model_summary = pd.DataFrame(['bp'], columns=['Features']) model_summary['Weights Raw'] = model.coef_ model_summary = model_summary.append({'Features':'Intercept', 'Weights Raw':float(model.intercept_)}, ignore_index=True) model_summary # + id="aaWNV91osFnt" preds = model.predict(X_test) out = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': preds.flatten()}) # + colab={"base_uri": "https://localhost:8080/", "height": 426} id="LMvwbpgesOH_" outputId="05df1adb-2585-4eb0-def3-dc99bf0f0359" plt.figure(figsize=(5*2,3*2)) plt.title('Predictions', fontsize=30) plt.scatter(y_test, preds, s = 256, alpha=0.4) plt.xlabel('Ground Truth', fontsize=20) plt.ylabel('Prediction', fontsize=20) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 374} id="9qjWP31_ufe2" outputId="a62b3e71-bf7a-4e93-a6ed-128dd2319680" plt.figure(figsize=(5*2,3*2)) plt.scatter(X_test, y_test, s = 256, color='blue', alpha=0.5) plt.plot(X_test, preds, color='red', linewidth=2) plt.show() # + [markdown] id="dy-OkP6avZv5" # ### Normal Equation # + id="L2l5-qIqvV0d" n = len(diabetes['bp']) #no. rows x_bias =np.ones((n,1)) #column-1 of Matrix X x_new = np.reshape(X,(n,1)) #reshaping the data x_new =np.append(x_bias,x_new,axis=1) #forming Matrix X x_new_transpose = np.transpose(x_new) #transpose x_new_transpose_dot_x_new = x_new_transpose.dot(x_new) #matrix multiplication temp_1 = np.linalg.inv(x_new_transpose_dot_x_new) #inverse of a matrix temp_2 = x_new_transpose.dot(y) # + colab={"base_uri": "https://localhost:8080/"} id="SQAgZewPvfyk" outputId="66819d1e-dedf-41a9-f67a-233edd19bd2b" #Finding coefficients: theta = temp_1.dot(temp_2) Intercept = theta[0] Slope = theta[1] print("Intercept:",Intercept) print("Slope:",Slope) # + id="O9pGcfnTvjV4" #Predicting values: def predict_value(input_feature,slope,intercept): return slope*input_feature+intercept # + colab={"base_uri": "https://localhost:8080/"} id="kzFLaLKvvmmN" outputId="b90d7f3c-75c1-4d81-cb08-87e48a78588b" bmi =3 prediction =predict_value(bmi,Slope,Intercept) print(prediction) # + colab={"base_uri": "https://localhost:8080/", "height": 405} id="u51-Bp0hvodU" outputId="9d5d1794-11b1-466e-d58d-e0e34913aae7" #Plotting the regression Line: plt.figure(figsize=(5*2,3*2)) plt.scatter(X,y, alpha=0.5) plt.xlabel('bp') plt.ylabel('s6') plt.plot(X,Slope*X+Intercept, color="red") # + [markdown] id="Bzny9JvGxnOq" # ### Polynomial curve # + colab={"base_uri": "https://localhost:8080/"} id="X7LFV-aTyXK2" outputId="89aaae73-f51e-47aa-e38c-2bed29db3356" def linear_regressor(X,y): X = np.array(X) y = np.array(y) n = X.size w0 = (y.mean()*np.sum(X**2)-X.mean()*np.sum(X*y)) / (np.sum(X**2) - n*X.mean()**2) w1 = (np.sum(X*y) - X.mean()*np.sum(y)) / (np.sum(X**2) - n*X.mean()**2) return w0,w1 w0,w1 = linear_regressor(X,y) print("Linear Regression Equation: y = {:.3f}x + {:.3f}".format(w1, w0)) # + colab={"base_uri": "https://localhost:8080/", "height": 320} id="dQ6HQkgNyYvI" outputId="de924c77-d16f-4ffa-9dc4-9d40d9209fa9" def show_regline(X,y,w1,w0): x_min, x_max = X.min() - 1, X.max() + 1 linex = np.linspace(x_min, x_max) liney = w1*linex+w0 plt.figure(figsize=(5,5)) plt.grid() plt.scatter(X,y, alpha=0.5) plt.plot(linex, liney, c='red') plt.show() show_regline(X,y,w1,w0) # + colab={"base_uri": "https://localhost:8080/", "height": 337} id="acE6HIAbyifm" outputId="cce37b86-5273-441a-c99a-1e2c5d640159" def lin_reg(val,w0,w1): return w1*val + w0 #model print(lin_reg(10, w0, w1)) X_new, y_new = X.copy(), y.copy() for i in range(10,16): X_new = np.insert(X_new,-1, i) y_new = np.insert(y_new,-1, lin_reg(i,w0,w1)) show_regline(X_new, y_new, w1, w0) # + id="3kTp8KQ72Ag2" X = diabetes['bp'].values.reshape(-1,1) y = diabetes['s6'].values.reshape(-1,1) # + id="LVStugs1x_a_" X_1 = np.arange(0, 20,1) y_1 = X_1 - 2 * (X_1 ** 2) + 0.5 * (X_1 ** 3) + np.random.normal(-3, 3, 20) # + id="NzLqAfnCtXHC" def show_regline(X,y,w1,w0): x_min, x_max = X.min() - 1, X.max() + 1 linex = np.linspace(x_min, x_max) liney = w1*linex+w0 plt.figure(figsize=(5,5)) plt.grid() plt.scatter(X_1,y_1, s = 256, color='blue', alpha=0.5) plt.plot(linex, liney, c='red') plt.show() # + id="XTn_2GnDzRgk" def linear_regressor(X,y): X = np.array(X) y = np.array(y) n = X.size w0 = (y.mean()*np.sum(X**2)-X.mean()*np.sum(X*y)) / (np.sum(X**2) - n*X.mean()**2) w1 = (np.sum(X*y) - X.mean()*np.sum(y)) / (np.sum(X**2) - n*X.mean()**2) return w0,w1 w0,w1 = linear_regressor(X,y) # + colab={"base_uri": "https://localhost:8080/", "height": 320} id="4STHgQ5Zz6_i" outputId="43cf2f33-d7e2-4ef9-9d2b-8a4687ce072c" w0_q,w1_q = linear_regressor(X_1, y_1) show_regline(X_1,y_1,w0_q,w1_q) # + [markdown] id="wkenTVL4IjQ5" # ### References: # # [1] <NAME>, "Curve Fitting," in Numerical Method, 2021: [**Curve Fitting Techniques**](https://github.com/dyjdlopez/numeth2021/blob/main/Week%209-13%20-%20Curve%20Fitting%20Techniques/NuMeth_4_Curve_Fitting.ipynb) # # [2] <NAME>, "Applied Linear Regression," in Numerical Method, 2021: [**Linear Regression**](https://github.com/dyjdlopez/numeth2021/blob/main/Week%209-13%20-%20Curve%20Fitting%20Techniques/NuMeth_4_5_Applied_Linear_Regression.ipynb) # # [3] <NAME> (2020): [**<NAME>: Implementation of Simple Linear Regression Using Normal Equation (Matrices)**](https://medium.com/@shuklapratik22/implementation-of-simple-linear-regression-using-normal-equation-matrices-f9021c3590da)
Midterm/58015_CurveFitting_Yon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 04 : Train vanilla neural network -- demo # # # # Training a one-layer net on MNIST # For Google Colaboratory import sys, os if 'google.colab' in sys.modules: # mount google drive from google.colab import drive drive.mount('/content/gdrive') path_to_file = '/content/gdrive/My Drive/CS4243_codes/codes/labs_lecture03/lab04_train_vanilla_nn' print(path_to_file) # move to Google Drive directory os.chdir(path_to_file) # !pwd import torch import torch.nn as nn import torch.optim as optim from random import randint import utils # ### randint(a,b) returns a random integer between a and b: # + idx=randint(5,10) # generate an integer randomly from 5 to 10 included. print(idx) # - # ### Download the TRAINING SET (data+labels) from utils import check_mnist_dataset_exists data_path=check_mnist_dataset_exists() # + train_data=torch.load(data_path+'mnist/train_data.pt') print(train_data.size()) # + train_label=torch.load(data_path+'mnist/train_label.pt') print(train_label.size()) # - # ### Download the TEST SET (data only) # + test_data=torch.load(data_path+'mnist/test_data.pt') print(test_data.size()) # - # ### Make a one layer net class class one_layer_net(nn.Module): def __init__(self, input_size, output_size): super(one_layer_net , self).__init__() self.linear_layer = nn.Linear( input_size, output_size , bias=False) def forward(self, x): x = self.linear_layer(x) p = torch.softmax(x, dim=1) return p # ### Build the net net=one_layer_net(784,10) print(net) # ### Take the 6th image of the test set: im=test_data[6] utils.show(im) # ### And feed it to the UNTRAINED network: p = net( im.view(1,784) ) print(p) # ### Display visually the confidence scores utils.show_prob_mnist(p) # ### Train the network (only 5000 iterations) on the train set # + criterion = nn.NLLLoss() optimizer=torch.optim.SGD(net.parameters() , lr=0.01 ) for iter in range(1,5000): # choose a random integer between 0 and 59,999 # extract the corresponding picture and label # and reshape them to fit the network idx=randint(0, 60000-1) input=train_data[idx].view(1,784) label=train_label[idx].view(1) # feed the input to the net input.requires_grad_() prob=net(input) # update the weights (all the magic happens here -- we will discuss it later) log_prob=torch.log(prob) loss = criterion(log_prob, label) optimizer.zero_grad() loss.backward() optimizer.step() # - # ### Take the 6th image of the test set: im=test_data[6] utils.show(im) # ### Feed it to the TRAINED net: prob = net( im.view(1,784)) print(prob) # ### Display visually the confidence scores utils.show_prob_mnist(prob) # ### Choose image at random from the test set and see how good/bad are the predictions # + # choose a picture at random idx=randint(0, 10000-1) im=test_data[idx] # diplay the picture utils.show(im) # feed it to the net and display the confidence scores prob = net( im.view(1,784)) utils.show_prob_mnist(prob) # -
codes/labs_lecture03/lab04_train_vanilla_nn/train_vanilla_nn_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import base58 import binascii wif='<KEY>' wif first_encode = base58.b58decode(wif) first_encode private_key_full = binascii.hexlify(first_encode) private_key_full private_key = private_key_full[2:-8] private_key.decode()
notebooks/WIF_TO_PRIVATE_KEY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.datasets import load_iris data = load_iris() y = data.target X = data.data pca = PCA(n_components=2) reduced_X = pca.fit_transform(X) red_x, red_y = [], [] blue_x, blue_y = [], [] green_x, green_y = [], [] for i in range(len(reduced_X)): if y[i] == 0: red_x.append(reduced_X[i][0]) red_y.append(reduced_X[i][1]) elif y[i] == 1: blue_x.append(reduced_X[i][0]) blue_y.append(reduced_X[i][1]) else: green_x.append(reduced_X[i][0]) green_y.append(reduced_X[i][1]) plt.scatter(red_x, red_y, c='r', marker='x') plt.scatter(blue_x, blue_y, c='b', marker='D') plt.scatter(green_x, green_y, c='g', marker='.') plt.show() # -
chapter14/ed2-ch14-s2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/GabrielLourenco12/python_algoritmo_de_busca_arad_bucarest/blob/main/Vetor_ordenado.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="3L3hMZxPloS9" # # Vetor ordenado # + id="ilKOzbksUosx" import numpy as np # + id="EI9-ginDUr7z" class VetorOrdenado: def __init__(self, capacidade): self.capacidade = capacidade self.ultima_posicao = -1 self.valores = np.empty(self.capacidade, dtype=int) # O(n) def imprime(self): if self.ultima_posicao == -1: print('O vetor está vazio') else: for i in range(self.ultima_posicao + 1): print(i, ' - ', self.valores[i]) # O(n) def insere(self, valor): if self.ultima_posicao == self.capacidade - 1: print('Capacidade máxima atingida') return posicao = 0 for i in range(self.ultima_posicao + 1): posicao = i if self.valores[i] > valor: break if i == self.ultima_posicao: posicao = i + 1 x = self.ultima_posicao while x >= posicao: self.valores[x + 1] = self.valores[x] x -= 1 self.valores[posicao] = valor self.ultima_posicao += 1 # O(n) def pesquisar(self, valor): for i in range(self.ultima_posicao + 1): if self.valores[i] > valor: return -1 if self.valores[i] == valor: return i if i == self.ultima_posicao: return -1 # O(log n) def pesquisa_binaria(self, valor): limite_inferior = 0 limite_superior = self.ultima_posicao while True: posicao_atual = int((limite_inferior + limite_superior) / 2) # Se achou na primeira tentativa if self.valores[posicao_atual] == valor: return posicao_atual # Se não achou elif limite_inferior > limite_superior: return -1 # Divide os limites else: # Limite inferior if self.valores[posicao_atual] < valor: limite_inferior = posicao_atual + 1 # Limite superior else: limite_superior = posicao_atual - 1 # O(n) def excluir(self, valor): posicao = self.pesquisar(valor) if posicao == -1: return -1 else: for i in range(posicao, self.ultima_posicao): self.valores[i] = self.valores[i + 1] self.ultima_posicao -= 1 # + id="stsNs_K8nn4r" outputId="7e014aef-abdd-4367-bd06-7763b40dbbf4" colab={"base_uri": "https://localhost:8080/", "height": 34} vetor = VetorOrdenado(10) vetor.imprime() # + id="6OQR86hMnr74" outputId="e42e6576-4dff-4f9d-f339-425016627422" colab={"base_uri": "https://localhost:8080/", "height": 34} vetor.insere(6) vetor.imprime() # + id="2SD9iJjrnzpB" outputId="0d032c4b-cc9d-4343-ef60-30c0d422bc8f" colab={"base_uri": "https://localhost:8080/", "height": 101} vetor.insere(4) vetor.imprime() # + id="UZiJuluyn4iq" outputId="cc1c46ea-93c9-4c0d-c1b9-83d44f91a5ea" colab={"base_uri": "https://localhost:8080/", "height": 84} vetor.insere(3) vetor.imprime() # + id="-EtyqP3ln--6" outputId="76939d01-98f5-4924-c86d-5cad13cdac96" colab={"base_uri": "https://localhost:8080/", "height": 118} vetor.insere(5) vetor.imprime() # + id="Ij44GQWRoE8-" outputId="305546d2-64aa-46bd-8ed4-7e8f7af609dd" colab={"base_uri": "https://localhost:8080/", "height": 134} vetor.insere(1) vetor.imprime() # + id="PHQuMp8aoKX3" outputId="ac01a013-78e5-401d-a90b-f305f9df3bd7" colab={"base_uri": "https://localhost:8080/", "height": 151} vetor.insere(8) vetor.imprime() # + id="u4RWmfsgpoUu" outputId="904e278c-9778-479d-e97e-ef7a51b3d328" colab={"base_uri": "https://localhost:8080/", "height": 162} vetor.pesquisar(3) # + id="JN_G7sRHp6ZD" outputId="c56f21be-8f67-4b93-f6fa-d8257fdf562e" colab={"base_uri": "https://localhost:8080/", "height": 34} vetor.pesquisar(2) # + id="g7Qy3DOpqIEQ" outputId="e663a88f-79f1-4008-c962-fc4007244310" colab={"base_uri": "https://localhost:8080/", "height": 34} vetor.pesquisar(9) # + id="TIyNYvRprA6i" outputId="d526e721-93c0-4fb2-b59d-6c9f13d1d114" colab={"base_uri": "https://localhost:8080/", "height": 151} vetor.imprime() # + id="890RvJJZrC-N" outputId="2ddd3b46-5fe0-41dd-e207-ae7f8f910400" colab={"base_uri": "https://localhost:8080/", "height": 134} vetor.excluir(4) vetor.imprime() # + id="tJZZj3dcrINv" outputId="0b4e702d-773a-436a-f96c-9afef3018c68" colab={"base_uri": "https://localhost:8080/", "height": 118} vetor.excluir(1) vetor.imprime() # + id="XIshZYD7rOrC" outputId="58ac36ab-8d11-45c3-9988-c328741dfd9d" colab={"base_uri": "https://localhost:8080/", "height": 101} vetor.excluir(8) vetor.imprime() # + id="yK-W-NZ_rTBv" outputId="01c48b43-3e58-4942-dc21-4fb14f92a0d0" colab={"base_uri": "https://localhost:8080/", "height": 34} vetor.excluir(6) # + id="VhiG4jtDZmsl" outputId="09b3d9ad-55c4-4326-c156-bf75b9e9a541" colab={"base_uri": "https://localhost:8080/", "height": 168} vetor = VetorOrdenado(10) vetor.insere(8) vetor.insere(9) vetor.insere(4) vetor.insere(1) vetor.insere(5) vetor.insere(7) vetor.insere(11) vetor.insere(13) vetor.insere(2) vetor.imprime() # + id="ThZslAO2Z01A" outputId="e1d09aa6-24ef-4d61-dd95-ad762c4f46b8" colab={"base_uri": "https://localhost:8080/", "height": 34} vetor.pesquisa_binaria(7) # + id="H7O2aeQCZ5dU" outputId="4e22ca31-91f1-416f-f373-80d16574d70a" colab={"base_uri": "https://localhost:8080/", "height": 34} vetor.pesquisa_binaria(5) # + id="v-fqvDvkZ-ln" outputId="774a089f-4a84-473d-a6a6-c0efec87f13c" colab={"base_uri": "https://localhost:8080/", "height": 34} vetor.pesquisa_binaria(13) # + id="0weYsscRaBeK" outputId="6268eafd-49fa-4713-8d34-0eb577dba4d7" colab={"base_uri": "https://localhost:8080/", "height": 34} vetor.pesquisa_binaria(20)
Vetor_ordenado.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="lCx9-Tf21c0_" # ## **Install the required packages and dependencies** # + id="JkPXhdd31Zgk" #check cuda version # !nvcc --version # install pytorch according to the cuda version from the pytorch website # !pip install torch==1.7.0+cu101 torchvision==0.8.1+cu101 torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html # install mmcv # !pip install mmcv-full==1.1.4 #clone github repository # !git clone https://github.com/rubeea/focal_phi_loss_mmsegmentation.git #install all requirements # !pip install -qr focal_phi_loss_mmsegmentation/requirements.txt # %cd /content/focal_phi_loss_mmsegmentation #install mmsegmentation in develop mode # !python setup.py develop # + [markdown] id="FZhbqjmm8IIw" # # **Make data directories and upload the data in them** # + id="sn1Qu9g0kJap" #declare global variables root_dir= '/content/focal_phi_loss_mmsegmentation/' data_root= "/content/focal_phi_loss_mmsegmentation/data/" dataset= "mendeley" # + id="aQcS320P22zn" colab={"base_uri": "https://localhost:8080/"} outputId="3a479c02-254b-42d9-bfe6-b9ddfda3ec80" import os #script to extract data from zip files and create the directory structure for mendeley # !python /content/focal_phi_loss_mmsegmentation/tools/convert_datasets/mendeley.py /content/focal_phi_loss_mmsegmentation/train_imgs.zip /content/focal_phi_loss_mmsegmentation/train_gt.zip /content/focal_phi_loss_mmsegmentation/val_imgs.zip /content/focal_phi_loss_mmsegmentation/val_gt.zip #check file counts train_imgs= data_root + dataset+'/img_dir/train' val_imgs= data_root + dataset +'/img_dir/val' train_gt= data_root + dataset+'/ann_dir/train' val_gt= data_root + dataset+'/ann_dir/val' #print the file count print(len(os.listdir(train_imgs))) print(len(os.listdir(val_imgs))) print(len(os.listdir(train_gt))) print(len(os.listdir(val_gt))) # + [markdown] id="CL9RLxEPabM_" # # **Visualize training and mask images** # + id="ZayX5trmabNC" import mmcv import matplotlib.pyplot as plt from google.colab.patches import cv2_imshow import numpy as np import os import glob PATH_TO_TRAIN_IMAGES_DIR= data_root+dataset+"/img_dir/train" PATH_TO_LABEL_IMAGES_DIR= data_root+dataset+"/ann_dir/train" TRAIN_IMAGE_PATHS = glob.glob(os.path.join(PATH_TO_TRAIN_IMAGES_DIR, "*.jpg")) LABEL_PATHS= glob.glob(os.path.join(PATH_TO_LABEL_IMAGES_DIR, "*.png")) TRAIN_IMAGE_PATHS.sort() LABEL_PATHS.sort() palette = [[120, 120, 120], [6, 230, 230]] #dataset palette display_num = 5 r_choices = np.random.choice(len(TRAIN_IMAGE_PATHS), display_num) #visualize any 5 random images and their mask images plt.figure(figsize=(10, 15)) for i in range(0, display_num * 2, 2): img_num = r_choices[i // 2] img = mmcv.imread(TRAIN_IMAGE_PATHS[img_num]) label = mmcv.imread(LABEL_PATHS[img_num]) plt.subplot(display_num, 2, i + 1) plt.imshow(img) plt.title("Input image") plt.subplot(display_num, 2, i + 2) plt.imshow(label) plt.title("Ground Truth") plt.suptitle("Examples of Images and their Masks") plt.show() # + [markdown] id="yUVtmn3Iq3WA" # #**Create a config file** # In the next step, we need to modify the config for the training. To accelerate the process, we finetune the model from trained weights. # + id="Wwnj9tRzqX_A" #config for ACU-Net from mmcv import Config cfg = Config.fromfile('/content/focal_phi_loss_mmsegmentation/configs/unet/fcn_unet_s5-d16_256x256_40k_hrf.py') # + [markdown] id="1y2oV5w97jQo" # Since the given config is used to train fcn U-Net on hrf dataset, we need to modify it accordingly for our new dataset. # + id="eyKnYC1Z7iCV" from mmseg.apis import set_random_seed import torch cfg_data_root = data_root+dataset train_img_dir = 'img_dir/train' train_ann_dir = 'ann_dir/train' val_img_dir = 'img_dir/val' val_ann_dir = 'ann_dir/val' # Since we use ony one GPU, BN is used instead of SyncBN cfg.norm_cfg = dict(type='BN', requires_grad=True) cfg.model.backbone.norm_cfg = cfg.norm_cfg cfg.model.decode_head.norm_cfg = cfg.norm_cfg cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg # modify num classes of the model in decode/auxiliary head cfg.model.decode_head.num_classes = 2 cfg.model.auxiliary_head.num_classes = 2 # Modify dataset type and path cfg.dataset_type = 'MendeleyDataset' cfg.data_root = cfg_data_root #batch size cfg.data.samples_per_gpu = 2 cfg.data.workers_per_gpu=2 #Balanced CE loss #assign class weights to tackle the imbalanced nature of the dataset in balanced CE loss # weights = [0.02, 0.98] # cfg.model.decode_head.loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, # loss_weight=1.0, class_weight= weights) # cfg.model.auxiliary_head.loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, # loss_weight=0.4, class_weight= weights) #Dice Loss # cfg.model.decode_head.loss_decode=dict(type='TverskyLoss', alpha=0.5, beta=0.5, gamma=1.0, use_focal=False, loss_weight=1.0) # cfg.model.auxiliary_head.loss_decode=dict(type='TverskyLoss', alpha=0.5, beta=0.5, gamma=1.0, use_focal=False, loss_weight=0.4) #Tversky Loss # cfg.model.decode_head.loss_decode=dict(type='TverskyLoss', alpha=0.3, beta=0.7, gamma=1.0, use_focal=False, loss_weight=1.0) # cfg.model.auxiliary_head.loss_decode=dict(type='TverskyLoss', alpha=0.3, beta=0.7, gamma=1.0, use_focal=False, loss_weight=0.4) #Focal Tversky Loss # cfg.model.decode_head.loss_decode=dict(type='TverskyLoss', alpha=0.3, beta=0.7, gamma=0.75, use_focal=True, loss_weight=1.0) # cfg.model.auxiliary_head.loss_decode=dict(type='TverskyLoss', alpha=0.3, beta=0.7, gamma=0.75, use_focal=True, loss_weight=0.4) #MCC loss # cfg.model.decode_head.loss_decode=dict(type='PhiLoss', loss_weight=1.0, gamma=1.0) # cfg.model.auxiliary_head.loss_decode=dict(type='PhiLoss', loss_weight=0.4, gamma=1.0) #Focal Phi Loss cfg.model.decode_head.loss_decode=dict(type='PhiLoss', loss_weight=1.0, gamma=0.5) cfg.model.auxiliary_head.loss_decode=dict(type='PhiLoss', loss_weight=0.4, gamma=0.5) #normalizing the dataset cfg.img_norm_cfg = dict( mean= [142.393, 137.978, 83.073], std= [23.228, 20.046, 21.623], to_rgb=True) #for mendeley dataset cfg.crop_size = (256, 256) #dataset config cfg.train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(512, 512), ratio_range=(0.5, 1.5)), #for mendeley dataset dict(type='RandomRotate', prob=0.5, degree=(45.0,315.0),pad_val=0,seg_pad_val=255), #for pldu and mendeley dataset dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='Pad', size=cfg.crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']), ] cfg.test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale= (512, 512), #for mendeley dataset flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] cfg.data.train.type = cfg.dataset_type cfg.data.train.data_root = cfg.data_root cfg.data.train.img_dir = train_img_dir cfg.data.train.ann_dir = train_ann_dir cfg.data.train.pipeline = cfg.train_pipeline cfg.data.train.split = None cfg.data.val.type = cfg.dataset_type cfg.data.val.data_root = cfg.data_root cfg.data.val.img_dir = val_img_dir cfg.data.val.ann_dir = val_ann_dir cfg.data.val.pipeline = cfg.test_pipeline cfg.data.val.split = None cfg.data.test.type = cfg.dataset_type cfg.data.test.data_root = cfg.data_root cfg.data.test.img_dir = val_img_dir cfg.data.test.ann_dir = val_ann_dir cfg.data.test.pipeline = cfg.test_pipeline cfg.data.test.split = None # Set up working dir to save files and logs. cfg.work_dir = root_dir+'/work_dirs/tutorial' cfg.total_iters = 160 cfg.log_config.interval = 50 cfg.evaluation.interval = 200 #validation at every 200 iterations cfg.checkpoint_config.interval = 200 # Set seed to facilitate reproducing the result cfg.seed = 0 set_random_seed(0, deterministic=False) cfg.gpu_ids = range(1) #for Dice Loss, Tversky Loss and Focal Tversky Loss on Mendeley dataset use the following learning rates with Adam: # lr = 5e-5 cfg.optimizer = dict(type='Adam', lr=1e-3, weight_decay=0.0001, paramwise_cfg = dict( custom_keys={ 'head': dict(lr_mult=10.) } )) # Let's have a look at the final config used for training print(f'Config:\n{cfg.pretty_text}') # + [markdown] id="YXvx9Ll9EvNS" # # **Train the model according to the config** # + id="jYKoSfdMF12B" from mmseg.datasets import build_dataset from mmseg.models import build_segmentor from mmseg.apis import train_segmentor from mmseg.utils import collect_env, get_root_logger import mmcv import os.path as osp # Build the dataset datasets = [build_dataset(cfg.data.train)] meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()]) meta['env_info'] = env_info meta['seed'] = cfg.seed meta['exp_name'] = osp.basename(cfg.filename) cfg.checkpoint_config.meta = dict( CLASSES=datasets[0].CLASSES, PALETTE=datasets[0].PALETTE) # Build the detector model = build_segmentor( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # Add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES # Create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) train_segmentor(model, datasets, cfg, distributed=False, validate=True, meta=meta) # + [markdown] id="yFKytoqKGp92" # # **Inference with Trained Model** # + id="KwPcnvRXeiVo" #create direcrtory for storing results # !mkdir /content/focal_phi_loss_mmsegmentation/work_dirs/results # !mkdir /content/focal_phi_loss_mmsegmentation/work_dirs/results/masks # !mkdir /content/focal_phi_loss_mmsegmentation/work_dirs/results/overlays # + id="P1lhqp-aIfln" from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot import mmcv import matplotlib.pyplot as plt from google.colab.patches import cv2_imshow import numpy as np import os import glob import zipfile PATH_TO_TEST_IMAGES_DIR= data_root+dataset+"/img_dir/val" PATH_TO_LABEL_IMAGES_DIR= data_root+dataset+"/ann_dir/val" TEST_IMAGE_PATHS = glob.glob(os.path.join(PATH_TO_TEST_IMAGES_DIR, "*.jpg")) LABEL_PATHS= glob.glob(os.path.join(PATH_TO_LABEL_IMAGES_DIR, "*.png")) TEST_IMAGE_PATHS.sort() LABEL_PATHS.sort() checkpoint= root_dir+'/work_dirs/tutorial/iter_200.pth' #checkpoint path model = init_segmentor(cfg, checkpoint, device='cuda:0') palette = [[120, 120, 120], [6, 230, 230]] #dataset palette print(len(TEST_IMAGE_PATHS)) for i in range(0, len(TEST_IMAGE_PATHS)): img = mmcv.imread(TEST_IMAGE_PATHS[i]) label = mmcv.imread(LABEL_PATHS[i]) result = inference_segmentor(model, img) img_name= LABEL_PATHS[i].split("/")[7] #FOR MASK # img_name= TEST_IMAGE_PATHS[i].split("/")[7] #for overlay #prediction mask arr= np.array(result) arr=np.squeeze(arr) #overlay image # overlay = model.show_result(img, result, palette=palette, show=False) # plt.subplot(1, 7 , 1) # plt.imshow(img) # plt.title("Input image") # plt.subplot(1, 7, 3) # plt.imshow(label) # plt.title("Ground Truth") # plt.subplot(1, 7, 5) plt.imshow(arr) plt.title("Prediction") # plt.subplot(1, 7, 7) # plt.imshow(mmcv.bgr2rgb(overlay)) # plt.title("Overlay") # show_result_pyplot(model, img, result,palette) image_file_name= root_dir+"/work_dirs/results/masks/"+img_name plt.savefig(image_file_name) print("saving "+img_name) i= i+1 # plt.figure(figsize=(8, 6)) # show_result_pyplot(model, img, result,palette) # + [markdown] id="PCvbTJmBdNyB" # # **Downloading Inference Results** # + id="4nOek-cqitGi" from google.colab import files #mask results # !zip -r /content/focal_phi_loss_mmsegmentation/work_dirs/results/masks /content/focal_phi_loss_mmsegmentation/work_dirs/results/masks files.download("/content/focal_phi_loss_mmsegmentation/work_dirs/results/masks.zip") # + id="80ZSKSCenKFS" #overlay results # !zip -r /content/focal_phi_loss_mmsegmentation/work_dirs/overlays /content/focal_phi_loss_mmsegmentation/work_dirs/overlays files.download("/content/focal_phi_loss_mmsegmentation/work_dirs/overlays.zip")
focal_phi_loss_mendeley_acunet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import warnings # `do not disturbe` mode warnings.filterwarnings('ignore') import numpy as np # vectors and matrices import pandas as pd # tables and data manipulations import matplotlib.pyplot as plt # plots import seaborn as sns # more plots from dateutil.relativedelta import relativedelta # working with dates with style from scipy.optimize import minimize # for function minimization import statsmodels.formula.api as smf # statistics and econometrics import statsmodels.tsa.api as smt import statsmodels.api as sm import scipy.stats as scs from itertools import product # some useful functions from tqdm import tqdm_notebook # %matplotlib inline # - url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv' confirmed = pd.read_csv(url, error_bad_lines=False) url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv' death = pd.read_csv(url, error_bad_lines=False) url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv' recover = pd.read_csv(url, error_bad_lines=False) # fix region names confirmed['Country/Region']= confirmed['Country/Region'].str.replace("Bd", "Bangladesh") confirmed['Country/Region']= confirmed['Country/Region'].str.replace("US", "United States") death['Country/Region']= death['Country/Region'].str.replace("Bd", "Bangladesh") death['Country/Region']= death['Country/Region'].str.replace("US", "United States") recover['Country/Region']= recover['Country/Region'].str.replace("Bd", "Bangladesh") recover['Country/Region']= recover['Country/Region'].str.replace("US", "United States") confirmed.head(92) confirmed.columns[ :] confirmed.shape[:] confirmed.info() confirmed.describe() confirmed.describe(include=['object', 'bool']) confirmed['3/20/20'].value_counts() confirmed['3/20/20'].value_counts(normalize=True) confirmed.sort_values(by='3/20/20', ascending=False).head() confirmed.sort_values(by=['Country/Region','3/20/20'], ascending=[False,True]).head() confirmed['3/20/20'].mean() confirmed[confirmed['3/20/20'] == 1].mean() confirmed.loc[0:5] confirmed.iloc[0:5, 55:] confirmed[:-1] confirmed[-1:] # some imports to set up plotting import matplotlib.pyplot as plt # pip install seaborn import seaborn as sns # Graphics in retina format are more sharp and legible # %config InlineBackend.figure_format = 'retina' sns.countplot(x='Country/Region', hue='3/20/20', data=confirmed) features = ['Country/Region', '3/20/20'] confirmed[features].hist(figsize=(10, 4)) df = confirmed df[features].plot(kind='density', subplots=True, layout=(1, 2), sharex=False, figsize=(10, 4)) sns.distplot(df['3/20/20']) sns.boxplot(x='3/20/20', data=df) _, axes = plt.subplots(1, 2, sharey=True, figsize=(6, 4)) sns.boxplot(data=df['3/20/20'], ax=axes[0]) sns.violinplot(data=df['3/20/20'], ax=axes[1]) df[features].describe() _, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 4)) sns.countplot(x='3/20/20', data=df, ax=axes[0]); sns.countplot(x='Country/Region', data=df, ax=axes[1]); numerical = list(set(df.columns) - set(['Province/State', 'Country/Region', 'Lat', 'Long'])) # Calculate and plot corr_matrix = df[numerical].corr() sns.heatmap(corr_matrix) plt.scatter(df['3/19/20'], df['3/20/20']) plt.scatter(df['Country/Region'], df['3/20/20']) sns.jointplot(x='3/1/20', y='3/20/20', data=df, kind='scatter') sns.jointplot(x='3/1/20', y='3/20/20', data=df, kind='hex') # `pairplot()` may become very slow with the SVG or retina format # %config InlineBackend.figure_format = 'png' sns.pairplot(df[numerical]); # %config InlineBackend.figure_format = 'retina' sns.lmplot('3/1/20', '3/20/20', data=df, hue='Country/Region', fit_reg=False)
src/forecast.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mherbert93/DS-Unit-2-Linear-Models/blob/master/module3-ridge-regression/LS_DS_213_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="fq9wNXC4OJiI" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 1, Module 3* # # --- # + [markdown] colab_type="text" id="7IXUfiQ2UKj6" # # Ridge Regression # # ## Assignment # # We're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices. # # But not just for condos in Tribeca... # # - [x] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million. # - [x] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test. # - [x] Do one-hot encoding of categorical features. # - [x] Do feature selection with `SelectKBest`. # - [x] Fit a ridge regression model with multiple features. Use the `normalize=True` parameter (or do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html) beforehand — use the scaler's `fit_transform` method with the train set, and the scaler's `transform` method with the test set) # - [x] Get mean absolute error for the test set. # - [x] As always, commit your notebook to your fork of the GitHub repo. # # The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal. # # # ## Stretch Goals # # Don't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from. # # - [ ] Add your own stretch goal(s) ! # - [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥 # - [x] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html). # - [ ] Learn more about feature selection: # - ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance) # - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html) # - [mlxtend](http://rasbt.github.io/mlxtend/) library # - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection) # - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson. # - [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients. # - [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way. # - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html). # + colab_type="code" id="o9eSnDYhUGD7" colab={} # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/' # !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # Ignore this Numpy warning when using Plotly Express: # FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead. import warnings warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy') # + colab_type="code" id="QJBD4ruICm1m" colab={} import pandas as pd import pandas_profiling # Read New York City property sales data df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv') # Change column names: replace spaces with underscores df.columns = [col.replace(' ', '_') for col in df] # SALE_PRICE was read as strings. # Remove symbols, convert to integer df['SALE_PRICE'] = ( df['SALE_PRICE'] .str.replace('$','') .str.replace('-','') .str.replace(',','') .astype(int) ) # + id="oIE711g7OJiW" colab_type="code" colab={} # BOROUGH is a numeric column, but arguably should be a categorical feature, # so convert it from a number to a string df['BOROUGH'] = df['BOROUGH'].astype(str) # + id="uEQe6U1HOJib" colab_type="code" colab={} # Reduce cardinality for NEIGHBORHOOD feature # Get a list of the top 10 neighborhoods top10 = df['NEIGHBORHOOD'].value_counts()[:10].index # At locations where the neighborhood is NOT in the top 10, # replace the neighborhood with 'OTHER' df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER' # + id="MuzpHnr2OJif" colab_type="code" outputId="fae47f41-9bca-49a9-9f50-ddaeecfe396f" colab={"base_uri": "https://localhost:8080/", "height": 469} df.head() # + id="4pbdYV3lOv-b" colab_type="code" colab={} df = df[(df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS') & ((df['SALE_PRICE'] > 100000) & (df['SALE_PRICE'] < 2000000))] # + id="p3O3SINBPXHi" colab_type="code" outputId="729b9ce4-8ae0-42a3-ce47-6b27d7167245" colab={"base_uri": "https://localhost:8080/", "height": 135} df.head(1) # + id="Dr_wjaeCbYDm" colab_type="code" outputId="405af31e-4f9c-47d0-ad14-86f9d63703ae" colab={"base_uri": "https://localhost:8080/", "height": 421} df.isnull().sum() # + id="bydd57_lbdM0" colab_type="code" outputId="3352d00a-60f3-4998-8105-6926c77b8df3" colab={"base_uri": "https://localhost:8080/", "height": 35} df.shape # + id="NtWGnBrwbfje" colab_type="code" colab={} df.drop(['EASE-MENT', 'APARTMENT_NUMBER'], axis=1, inplace=True) #all values in this column are null, so drop them # + id="ttzSRL5YPdjt" colab_type="code" colab={} df['SALE_DATE'] = pd.to_datetime(df['SALE_DATE'], infer_datetime_format=True) #convert to datatime format train = df[(df['SALE_DATE'] >= '2019-01-01') & (df['SALE_DATE'] < '2019-04-01')] test = df[(df['SALE_DATE'] >= '2019-04-01') & (df['SALE_DATE'] < '2019-06-01')] # + id="bIOxRdwxT4SB" colab_type="code" outputId="770b02bb-8338-4e50-e806-da669d735864" colab={"base_uri": "https://localhost:8080/", "height": 238} train.describe(exclude='number') # + id="h-QOekLivpJO" colab_type="code" outputId="a343ea20-123e-4f67-f425-7e97b20f74d2" colab={"base_uri": "https://localhost:8080/", "height": 300} train.describe(include='number') # + id="b5fTLVPJUFGR" colab_type="code" colab={} #remove columns that have high cardinality train = train.drop(['ADDRESS', 'LAND_SQUARE_FEET', 'SALE_DATE'], axis=1) test = test.drop(['ADDRESS', 'LAND_SQUARE_FEET', 'SALE_DATE'], axis=1) # + id="XMkS55OSXZUd" colab_type="code" colab={} target = 'SALE_PRICE' features = train.columns.drop([target]) # + id="ejs8eznaQxm5" colab_type="code" colab={} X_train = train[features] X_test = test[features] Y_train = train['SALE_PRICE'] Y_test = test['SALE_PRICE'] import category_encoders as ce encoder = ce.OneHotEncoder(use_cat_names=True) X_train = encoder.fit_transform(X_train) X_test = encoder.transform(X_test) # + id="obSfs3bKX-oK" colab_type="code" outputId="25fd2788-41d5-4bdc-c579-11f82c83025a" colab={"base_uri": "https://localhost:8080/", "height": 244} X_train.head() # + id="rcpB4rDuYhII" colab_type="code" outputId="6994fff7-ffd5-42ee-bce8-2e866472b989" colab={"base_uri": "https://localhost:8080/", "height": 35} X_train.shape # + id="Wc0wtU6FZpUs" colab_type="code" colab={} from sklearn.feature_selection import SelectKBest, f_regression selector = SelectKBest(score_func=f_regression, k=9) X_train_selected = selector.fit_transform(X_train, Y_train) X_test_selected = selector.transform(X_test) # + id="IS_3VU1hbIrC" colab_type="code" outputId="60a2d711-c907-45e2-ddfb-172a5cb2fa95" colab={"base_uri": "https://localhost:8080/", "height": 201} selected_mask = selector.get_support() all_names = X_train.columns selected_names = all_names[selected_mask] unselected_names = all_names[~selected_mask] print('Features selected:') for name in selected_names: print(name) # + id="BBMSxJ_zTGC5" colab_type="code" outputId="61c4734b-55eb-4fcd-d00c-03a8d653fcac" colab={"base_uri": "https://localhost:8080/", "height": 1000} import warnings warnings.filterwarnings("ignore") from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error from sklearn.linear_model import RidgeCV previous_mae = 1000000000 previous_features = 100000000 diff = 100 alphas = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0] for k in range(1, len(X_train.columns)+1): selector = SelectKBest(score_func=f_regression, k=k) X_train_selected = selector.fit_transform(X_train, Y_train) X_test_selected = selector.transform(X_test) ridge = RidgeCV(alphas=alphas, normalize=True, cv=10) ridge.fit(X_train_selected, Y_train) prediction = ridge.predict(X_test_selected) mae = mean_absolute_error(Y_test, prediction) print(f'Test Mean Absolute Error: ${mae:,.0f} \n') print("Alpha value is: ", ridge.alpha_) print(f'{k} features') selected_mask = selector.get_support() all_names = X_train.columns selected_names = all_names[selected_mask] unselected_names = all_names[~selected_mask] print('Features selected:') for name in selected_names: print(name) # + id="xBAnyOdPj_x3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="9d575cb0-9397-42fe-fc11-27981a1bc0c0" from sklearn.linear_model import Ridge selector = SelectKBest(score_func=f_regression, k=12) X_train_selected = selector.fit_transform(X_train, Y_train) X_test_selected = selector.transform(X_test) ridge = Ridge(alpha=0.001, normalize=True) #use alpha value as determined from above ridge.fit(X_train_selected, Y_train) prediction = ridge.predict(X_test_selected) mae = mean_absolute_error(Y_test, prediction) print(f'Test Mean Absolute Error: ${mae:,.0f} \n') # + id="r301sDMfkCAV" colab_type="code" colab={}
module3-ridge-regression/LS_DS_213_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mirror1 # language: python # name: mirror1 # --- # # MLFlow Pre-packaged Model Server AB Test Deployment # In this example we will build two models with MLFlow and we will deploy them as an A/B test deployment. The reason this is powerful is because it allows you to deploy a new model next to the old one, distributing a percentage of traffic. These deployment strategies are quite simple using Seldon, and can be extended to shadow deployments, multi-armed-bandits, etc. # ## Tutorial Overview # # This tutorial will follow closely break down in the following sections: # # 1. Train the MLFlow elastic net wine example # # 2. Deploy your trained model leveraging our pre-packaged MLFlow model server # # 3. Test the deployed MLFlow model by sending requests # # 4. Deploy your second model as an A/B test # # 5. Visualise and monitor the performance of your models using Seldon Analytics # # It will follow closely our talk at the [Spark + AI Summit 2019 on Seldon and MLflow](https://www.youtube.com/watch?v=D6eSfd9w9eA). # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ## Dependencies # # For this example to work you must be running Seldon 0.3.2 or above - you can follow our [getting started guide for this](https://docs.seldon.io/projects/seldon-core/en/latest/workflow/install.html). # # In regards to other dependencies, make sure you have installed: # # * Helm v2.13.1+ # * kubectl v1.14+ # * Python 3.6+ # * MLFlow 1.1.0 # * pygmentize # # We will also take this chance to load the Python dependencies we will use through the tutorial: # - import pandas as pd import numpy as np from seldon_core.seldon_client import SeldonClient # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # #### Let's get started! 🚀🔥 # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ## 1. Train the first MLFlow Elastic Net Wine example # # For our example, we will use the elastic net wine example from [MLflow's tutorial](https://www.mlflow.org/docs/latest/tutorial.html). # - # ### MLproject # # As any other MLflow project, it is defined by its `MLproject` file: # !pygmentize -l yaml MLproject # We can see that this project uses Conda for the environment and that it's defined in the `conda.yaml` file: # !pygmentize conda.yaml # Lastly, we can also see that the training will be performed by the `train.py` file, which receives two parameters `alpha` and `l1_ratio`: # !pygmentize train.py # ### Dataset # # We will use the wine quality dataset. # Let's load it to see what's inside: data = pd.read_csv("wine-quality.csv") data.head() # ### Training # # We've set up our MLflow project and our dataset is ready, so we are now good to start training. # MLflow allows us to train our model with the following command: # # ``` bash # $ mlflow run . -P alpha=... -P l1_ratio=... # ``` # # On each run, `mlflow` will set up the Conda environment defined by the `conda.yaml` file and will run the training commands defined in the `MLproject` file. # !mlflow run . -P alpha=0.5 -P l1_ratio=0.5 # Each of these commands will create a new run which can be visualised through the MLFlow dashboard as per the screenshot below. # # ![](images/mlflow-dashboard.png) # # Each of these models can actually be found on the `mlruns` folder: # !tree -L 1 mlruns/0 # ### MLmodel # # Inside each of these folders, MLflow stores the parameters we used to train our model, any metric we logged during training, and a snapshot of our model. # If we look into one of them, we can see the following structure: # !tree mlruns/0/$(ls mlruns/0 | head -1) # In particular, we are interested in the `MLmodel` file stored under `artifacts/model`: # !pygmentize -l yaml mlruns/0/$(ls mlruns/0 | head -1)/artifacts/model/MLmodel # This file stores the details of how the model was stored. # With this information (plus the other files in the folder), we are able to load the model back. # Seldon's MLflow server will use this information to serve this model. # # Now we should upload our newly trained model into a public Google Bucket or S3 bucket. # We have already done this to make it simpler, which you will be able to find at `gs://seldon-models/mlflow/model-a`. # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ## 2. Deploy your model using the Pre-packaged Moldel Server for MLFlow # # Once you have a Kubernetes Cluster running with [Seldon](https://docs.seldon.io/projects/seldon-core/en/latest/workflow/install.html) and [Ambassador](https://docs.seldon.io/projects/seldon-core/en/latest/workflow/install.html#install-ambassador) running we can deploy our trained MLFlow model. # For this we have to create a Seldon definition of the model server definition, which we will break down further below. # # We will be using the model we updated to our google bucket (gs://seldon-models/mlflow/elasticnet_wine), but you can use your model if you uploaded it to a public bucket. # - # !pygmentize mlflow-model-server-seldon-config.yaml # Once we write our configuration file, we are able to deploy it to our cluster by running it with our command # !kubectl apply -f mlflow-model-server-seldon-config.yaml # Once it's created we just wait until it's deployed. # # It will basically download the image for the pre-packaged MLFlow model server, and initialise it with the model we specified above. # # You can check the status of the deployment with the following command: # !kubectl rollout status deployment.apps/mlflow-deployment-mlflow-deployment-dag-77efeb1 # Once it's deployed, we should see a "succcessfully rolled out" message above. We can now test it! # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ## 3. Test the deployed MLFlow model by sending requests # Now that our model is deployed in Kubernetes, we are able to send any requests. # - # We will first need the URL that is currently available through Ambassador. # # If you are running this locally, you should be able to reach it through localhost, in this case we can use port 80. # !kubectl get svc | grep ambassador # Now we will select the first datapoint in our dataset to send to the model. x_0 = data.drop(["quality"], axis=1).values[:1] print(list(x_0[0])) # We can try sending a request first using curl: # !curl -X POST -H 'Content-Type: application/json' \ # -d "{'data': {'names': [], 'ndarray': [[7.0, 0.27, 0.36, 20.7, 0.045, 45.0, 170.0, 1.001, 3.0, 0.45, 8.8]]}}" \ # http://localhost:80/seldon/default/mlflow-deployment/api/v0.1/predictions # We can also send the request by using our python client # + from seldon_core.seldon_client import SeldonClient import math import numpy as np import subprocess HOST = "localhost" # Add the URL you found above port = "80" # Make sure you use the port above batch = x_0 payload_type = "ndarray" sc = SeldonClient( gateway="ambassador", gateway_endpoint=HOST + ":" + port) client_prediction = sc.predict( data=batch, deployment_name="mlflow-deployment", names=[], payload_type=payload_type) print(client_prediction.response) # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ## 4. Deploy your second model as an A/B test # # Now that we have a model in production, it's possible to deploy a second model as an A/B test. # Our model will also be an Elastic Net model but using a different set of parameters. # We can easily train it by leveraging MLflow: # - # !mlflow run . -P alpha=0.75 -P l1_ratio=0.2 # As we did before, we will now need to upload our model to a cloud bucket. # To speed things up, we already have done so and the second model is now accessible in `gs://seldon-models/mlflow/model-b`. # ### A/B test # # We will deploy our second model as an A/B test. # In particular, we will redirect 20% of the traffic to the new model. # # This can be done by simply adding a `traffic` attribute on our `SeldonDeployment` spec: # !pygmentize ab-test-mlflow-model-server-seldon-config.yaml # And similar to the model above, we only need to run the following to deploy it: # !kubectl apply -f ab-test-mlflow-model-server-seldon-config.yaml # We can check that the models have been deployed and are running with the following command. # # We should now see the "a-" model and the "b-" models. # !kubectl get pods # ## 5. Visualise and monitor the performance of your models using Seldon Analytics # # This section is optional, but by following the instructions you will be able to visualise the performance of both models as per the chart below. # # In order for this example to work you need to install and run the [Grafana Analytics package for Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/analytics/analytics.html#helm-analytics-chart). # # For this we can access the URL with the command below, it will request an admin and password which by default are set to the following: # * Username: admin # * Password: <PASSWORD> # # You can access the grafana dashboard through the port provided below: # !kubectl get svc grafana-prom -o jsonpath='{.spec.ports[0].nodePort}' # Now that we have both models running in our Kubernetes cluster, we can analyse their performance using Seldon Core's integration with Prometheus and Grafana. # To do so, we will iterate over the training set (which can be found in `wine-quality.csv`), making a request and sending the feedback of the prediction. # # Since the `/feedback` endpoint requires a `reward` signal (i.e. the higher the better), we will simulate one as: # # $$ # R(x_{n}) # = \begin{cases} # \frac{1}{(y_{n} - f(x_{n}))^{2}} &, y_{n} \neq f(x_{n}) \\ # 500 &, y_{n} = f(x_{n}) # \end{cases} # $$ # # , where $R(x_{n})$ is the reward for input point $x_{n}$, $f(x_{n})$ is our trained model and $y_{n}$ is the actual value. # + def _get_reward(y, y_pred): if y == y_pred: return 500 return 1 / np.square(y - y_pred) def _test_row(row): input_features = row[:-1] feature_names = input_features.index.to_list() X = input_features.values.reshape(1, -1) y = row[-1].reshape(1, -1) # Note that we are re-using the SeldonClient defined previously r = sc.predict( deployment_name="mlflow-deployment", data=X, names=feature_names) y_pred = r.response.data.tensor.values reward = _get_reward(y, y_pred) sc.feedback( deployment_name="mlflow-deployment", prediction_request=r.request, prediction_response=r.response, reward=reward) return reward[0] data.apply(_test_row, axis=1) # - # You should now be able to see Seldon's pre-built Grafana dashboard. # ![](images/grafana-mlflow.jpg) # In bottom of the dashboard you can see the following charts: # # - On the left: the requests per second, which shows the different traffic breakdown we specified. # - On the center: the reward, where you can see how model `a` outperforms model `b` by a large margin. # - On the right, the latency for each one of them. # # You are able to add your own custom metrics, and try out other more complex deployments by following further guides at https://docs.seldon.io/projects/seldon-core/en/latest/workflow/README.html
examples/models/mlflow_server_ab_test_ambassador/mlflow_server_ab_test_ambassador.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd movies = pd.read_csv('movies.csv', index_col='Title') movies movies.shape movies.dtypes movies.loc['Incredibles 2'] movies['Gross'] = ( movies['Gross'] .str.replace('$', '', regex=False) .str.replace(',', '', regex=False) .astype(float) ) studios = movies.groupby('Studio') studios['Gross'].sum().sort_values(ascending=False).head()
movies.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import equations as eq import sympy as sp from solvers import * # %matplotlib inline # - # To build the discrete operator # # \begin{equation} # A = a(x, y)\partial_x^2 + b(x, y)\partial_y^2 + \alpha(x, y)\partial_x + \beta(x, y)\partial_y + q(x, y) \partial_x\partial_y # \end{equation} # # with the exact solution # # \begin{equation} # u(x, y) = u_{\sf exact} # \end{equation} # # fill coefficients and exact solution in the following template def my_favourite_equation(L_x, L_y): x, y = sp.symbols('x, y', real=True) a = .0 b = .0 alpha = .0 beta = .0 q = .0 exact = .0 return eq.construct_equation(a, b, alpha, beta, q, exact, x, y, L_x, L_y) # Here $L_x, L_y$ define the physical space $x, y \in \left[0, L_x\right]\times\left[0, L_y\right]$. # As an example we consider the following equation # # \begin{equation} # \cosh(x y)\partial^2_x + \left[1 + \cos(\pi x^2 y)\right]^2 \partial_y^2 + \exp(x)\partial_x + \exp(y)\partial_y + (1-x)(1-y)\partial_x\partial_y, # \end{equation} # # \begin{equation} # u_{\sf exact} = x + y + 3x^3 # \end{equation} def equation_1633(L_x, L_y): x, y = sp.symbols('x, y', real=True) a = sp.cosh(x*y) b = (1 + sp.cos(sp.pi*x**2*y))**2 alpha = sp.exp(x) beta = sp.exp(y) q = (1-x)*(1-y) exact = x + y + 3*x**3 return eq.construct_equation(a, b, alpha, beta, q, exact, x, y, L_x, L_y) # When equation is defined we pass it to the function that construct ``coo_matrix`` # + J = 5 n_x = n_y = 2**5 h = 2**-5 A, rhs, exact = eq.construct_matrix(equation_1633, 'Dirichlet', 1, 1, n_x, n_y) # - # We convert ``coo_matrix`` to dense format of ``numpy`` A = A.toarray() # Now, we can run available solvers. # BiCGSTAB (scipy) solution, E_CG = BICGSTAB(A, rhs, tol=1e-15, verbose=True, write=True) np.linalg.norm(solution - exact) # Gauss-Seidel solution, E_GS = GS(A, rhs, tol=h**2, verbose=True, write=True) np.linalg.norm(solution - exact) # Belief propagation split solver (see the article for details) solution, E_split = split_BP_solver(A, rhs, tol=h**2, verbose=True, write=True) np.linalg.norm(solution - exact) solution_1, E_BP = GaBP(A, rhs, tol=h**2, verbose=True, write=True) np.linalg.norm(solution - exact) # For other available solvers see the notebook that reproduces figures from the article. # Now it is possible to access convergence history. For example for the split solver plt.plot(np.log(E_split)) # or for the Gaussian belief propagation solver plt.plot(np.log(E_BP))
template for your equations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lesson 3 Demo 1: 2 Queries 2 Tables # <img src="images/cassandralogo.png" width="250" height="250"> # ### In this demo we are going to walk through the basics of creating a table in Apache Cassandra, inserting rows of data, and doing a simple SQL query to validate the information. We will talk about the importance of Denormalization, and that 1 table per 1 query is an encouraged practice with Apache Cassandra. # #### We will use a python wrapper/ python driver called cassandra to run the Apache Cassandra queries. This library should be preinstalled but in the future to install this library you can run this command in a notebook to install locally: # # ! pip install cassandra-driver # #### More documentation can be found here: https://datastax.github.io/python-driver/ # #### Import Apache Cassandra python package import cassandra # ### First let's create a connection to the database from cassandra.cluster import Cluster try: cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance session = cluster.connect() except Exception as e: print(e) # ### Let's create a keyspace to do our work in # + try: session.execute(""" CREATE KEYSPACE IF NOT EXISTS udacity WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }""" ) except Exception as e: print(e) # - # #### Connect to our Keyspace. Compare this to how we had to create a new session in PostgreSQL. try: session.set_keyspace('udacity') except Exception as e: print(e) # ### Let's imagine we would like to start creating a Music Library of albums. # # ### We want to ask 2 questions of our data # #### 1. Give me every album in my music library that was released in a given year # `select * from music_library WHERE YEAR=1970` # #### 2. Give me every album in my music library that was created by a given artist # `select * from artist_library WHERE artist_name="<NAME>"` # # ### Because I want to do two different quries, I am going to do need different tables that partition the data differently. # * My music library table will be by year that will become my partition key, and artist name will be my clustering column to make each Primary Key unique. # * My album library table will be by artist name that will be my partition key, and year will be my clustering column to make each Primary Key unique. More on Primary keys in the next lesson and demo. # # `Table Name: music_library # column 1: Year # column 2: Artist Name # column 3: Album Name # PRIMARY KEY(year, artist name)` # <img src="images/table1.png" width="350" height="350"> # # ` Table Name: album_library # column 1: Artist Name # column 2: Year # column 3: Album Name # PRIMARY KEY (artist name, year)` # <img src="images/table2.png" width="350" height="350"> # Let's create both tables. # + query = "CREATE TABLE IF NOT EXISTS music_library " query = query + "(year int, artist_name text, album_name text, PRIMARY KEY (year, artist_name))" try: session.execute(query) except Exception as e: print(e) query = "CREATE TABLE IF NOT EXISTS album_library " query = query + "(year int, artist_name text, album_name text, PRIMARY KEY (artist_name, year))" try: session.execute(query) except Exception as e: print(e) # - # ### Let's insert some data into both tables # + query = "INSERT INTO music_library (year, artist_name, album_name)" query = query + " VALUES (%s, %s, %s)" query1 = "INSERT INTO album_library (artist_name, year, album_name)" query1 = query1 + " VALUES (%s, %s, %s)" try: session.execute(query, (1970, "The Beatles", "Let it Be")) except Exception as e: print(e) try: session.execute(query, (1965, "The Beatles", "Rubber Soul")) except Exception as e: print(e) try: session.execute(query, (1965, "The Who", "My Generation")) except Exception as e: print(e) try: session.execute(query, (1966, "The Monkees", "The Monkees")) except Exception as e: print(e) try: session.execute(query, (1970, "The Carpenters", "Close To You")) except Exception as e: print(e) try: session.execute(query1, ("The Beatles", 1970, "Let it Be")) except Exception as e: print(e) try: session.execute(query1, ("The Beatles", 1965, "Rubber Soul")) except Exception as e: print(e) try: session.execute(query1, ("The Who", 1965, "My Generation")) except Exception as e: print(e) try: session.execute(query1, ("The Monkees", 1966, "The Monkees")) except Exception as e: print(e) try: session.execute(query1, ("The Carpenters", 1970, "Close To You")) except Exception as e: print(e) # - # ### This might have felt unnatural to insert duplicate data into two tables. If I just normalized these tables, I wouldn't have to have extra copies! While this is true, remember there are no `JOINS` in Apache Cassandra. For the benefit of high availibity and scalabity denormalization must be how this is done. # # ### Let's Validate our Data Model # # `select * from music_library WHERE YEAR=1970` # + query = "select * from music_library WHERE YEAR=1970" try: rows = session.execute(query) except Exception as e: print(e) for row in rows: print (row.year, row.artist_name, row.album_name,) # - # ### Let's Validate our Data Model # # `select * from album_library WHERE ARTIST_NAME = "The Beatles"` # + query = "select * from album_library WHERE ARTIST_NAME='The Beatles'" try: rows = session.execute(query) except Exception as e: print(e) for row in rows: print (row.artist_name, row.year, row.album_name) # - # ### For the sake of the demo, I will drop the table. # + query = "drop table music_library" try: rows = session.execute(query) except Exception as e: print(e) query = "drop table album_library" try: rows = session.execute(query) except Exception as e: print(e) # - # ### And Finally close the session and cluster connection session.shutdown() cluster.shutdown()
1_Data_Modeling/4_noSQL_Data_Modeling/Lesson 3 Demo 1 2 Queries 2 Tables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Performance metrics # # In this notebook we return to the **supervised learning** algorithms we trained in the last notebook, but dive deeper into how to evaluate them. As we saw, **classification** and **regression** each have their own performance metrics. Classification itself can be sub-divided into **binary** and **multi-class** classification with a set of metrics for each. # The data and code-base in this notebook is very similar to the last notebook. However the exercises have changed to reflect the topic of this chapter. # # We use a bank marketing data, which has demographic and activity data about bank customers, as well as information about previous attempts to contact them for a marketing campain. The target `y` is binary and indicates whether the client signed up for a term deposit or not. Let's load the data again. You can read more about the data [here](https://archive.ics.uci.edu/ml/datasets/Bank+Marketing). # + import pandas as pd import numpy as np bank = pd.read_csv("data/bank-full.csv", sep = ";") bank.head() # - # Since numeric and categorical features are often pre-processed differently, we will create variables that store the names of each to make it easier to refer to them later. # + num_cols = bank.select_dtypes(['integer', 'float']).columns cat_cols = bank.select_dtypes(['object']).drop(columns = "y").columns print("Numeric columns are {}.".format(", ".join(num_cols))) print("Categorical columns are {}.".format(", ".join(cat_cols))) # - # As usual before we can proceed to machine learning, we need to get the data ready. And since we're doing supervised learning, we need to set aside a test data set to later be evaluate the model. So let's begin by splitting the data. # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(bank.drop(columns = "y"), bank["y"], test_size = 0.15, random_state = 42) # - X_train = X_train.reset_index(drop = True) X_test = X_test.reset_index(drop = True) print(f"Training data has {X_train.shape[0]} rows.") print(f"Test data has {X_test.shape[0]} rows.") # Before we begin our journey of trying out different algorithms in `sklearn` we do need to encode our categorical features. # + from sklearn.preprocessing import OneHotEncoder onehoter = OneHotEncoder(sparse = False, drop = "first") onehoter.fit(X_train[cat_cols]) onehot_cols = onehoter.get_feature_names(cat_cols) X_train_onehot = pd.DataFrame(onehoter.transform(X_train[cat_cols]), columns = onehot_cols) X_test_onehot = pd.DataFrame(onehoter.transform(X_test[cat_cols]), columns = onehot_cols) # - # Some algorithms we're going to use (such as decision tree) won't require that we normalize our numeric features, but most will. Not doing so won't break the algorithm, but just as we saw in the case of k-means, it will skew the results. So let's Z-normalize our numeric features now. # + from sklearn.preprocessing import StandardScaler znormalizer = StandardScaler() znormalizer.fit(X_train[num_cols]) X_train_norm = pd.DataFrame(znormalizer.transform(X_train[num_cols]), columns = num_cols) X_test_norm = pd.DataFrame(znormalizer.transform(X_test[num_cols]), columns = num_cols) X_train_norm.head() # - # We now join our numeric features and our one-hot-encoded categorical features into one data set that we pass to the decision tree classifier. # + X_train_featurized = X_train_onehot # add one-hot-encoded columns X_test_featurized = X_test_onehot # add one-hot-encoded columns X_train_featurized[num_cols] = X_train_norm # add numeric columns X_test_featurized[num_cols] = X_test_norm # add numeric columns del X_train_norm, X_test_norm, X_train_onehot, X_test_onehot print("Featurized training data has {} rows and {} columns.".format(*X_train_featurized.shape)) print("Featurized test data has {} rows and {} columns.".format(*X_test_featurized.shape)) # - # ## Decision tree classifier # # With our data ready, we can now train a decision tree classifier. There is a lot of detail that we leave for another time, but the common pattern to all the supervised learning algorithm is what we want to call attention to here: # # 1. We create an **instance** of the algorithm, along with any settings we want to use. Here we instantiate a `DecisionTreeClassifier` and specify `max_depth = 10`. # 1. We train the algorithm on the training data by calling the `fit` method. # 1. Once the model is trained, we obtain prediction by calling the `predict` method. # + from sklearn.tree import DecisionTreeClassifier dtree = DecisionTreeClassifier(max_depth = 5) dtree.fit(X_train_featurized, y_train) y_hat_train = dtree.predict(X_train_featurized) y_hat_test = dtree.predict(X_test_featurized) # - # If we wish to evaluate the model, we only need to predict for the test data, but in our case we predict both for the training and test data so we can show the effect of overfitting or underfitting. # + from sklearn.metrics import accuracy_score acc_train = accuracy_score(y_train, y_hat_train) * 100 acc_test = accuracy_score(y_test, y_hat_test) * 100 print("Accuracy on the training data: {:.0f}%.".format(acc_train)) print("Accuracy on the test data: {:.0f}%.".format(acc_test)) # - # ### Exercise # # - Find the counts of positive and negative classes for the target variable. # - Based on the counts, would you recommend looking using **accuracy** to measure the model's performance? # - Check out what other performance metrics are available in `sklearn.metrics` that could be relevant to evaluating this model. Choose one and report the value. # ### End of exercise # ## k-nearest neighbor classifier # # A model trained using the k-nearest neighbor algorithm on the other hand is very different. It doesn't have a tree structure. Instead it labels a new data point by finding the $k$ points nearest to it and looking up what their labels are, and letting the new data's label be the same as whatever label the majority of its neighbors have. Optionally, we can let closer neighbors influence the vote more than more distant neighbors. # + from sklearn.neighbors import KNeighborsClassifier knnb = KNeighborsClassifier() knnb.fit(X_train_featurized, y_train) y_hat_train = knnb.predict(X_train_featurized) y_hat_test = knnb.predict(X_test_featurized) # - # If we wish to evaluate the model, we only need to predict for the test data, but in our case we predict both for the training and test data so we can show the effect of overfitting or underfitting. # + acc_train = accuracy_score(y_train, y_hat_train) * 100 acc_test = accuracy_score(y_test, y_hat_test) * 100 print("Accuracy on the training data: {:.0f}%.".format(acc_train)) print("Accuracy on the test data: {:.0f}%.".format(acc_test)) # - # ### Exercise # # - Get **precision** and **recall** for the model we trained above. Note that by default, the corresponding functions in `sklearn.metrics` expect the positive label to be the integer 1. For us, the positive label is the string `yes`, so we need to use `pos_label = 'yes'`. # - Instead of calling the `predict` method to get predictions, call the `predict_proba` method to get the probability $P(Y_i = 1)$ for each row. # - Change your the threshold from 0.50 (default) to 0.75 and based on this new threshold obtain hard predictions from the soft predictions we got in the last step. # - Obtain **precision** and **recall** once more (now that we changed the threshold). # - How did increasing the threshold change precision and recall. # ### End of exercise # ## Logistic regression classifier # # The logistic regression algorithm is another popular classifier. Careful here: even though it has the word **regression** in it, logistic regression is a **classification** algorithm, not a **regression** algorithm. A model trained using logistic regression predicts new classes using an **equation**. This makes logistic regression very efficient. In fact, once you have your trained model, you can pull out the equation's **coefficients** and implement it even in SQL: in just one query, although if we have a lot of features it could be nasty query! # + from sklearn.linear_model import LogisticRegression logit = LogisticRegression(max_iter = 5000) logit.fit(X_train_featurized, y_train) y_hat_train = logit.predict(X_train_featurized) y_hat_test = logit.predict(X_test_featurized) # - # Let's look at precision and recall on the training and test data. # + from sklearn.metrics import precision_score, recall_score precision_train = precision_score(y_train, y_hat_train, pos_label = 'yes') * 100 precision_test = precision_score(y_test, y_hat_test, pos_label = 'yes') * 100 recall_train = recall_score(y_train, y_hat_train, pos_label = 'yes') * 100 recall_test = recall_score(y_test, y_hat_test, pos_label = 'yes') * 100 print("Precision = {:.0f}% and recall = {:.0f}% on the training data.".format(precision_train, recall_train)) print("Precision = {:.0f}% and recall = {:.0f}% on the test data.".format(precision_test, recall_test)) # - # ## SVM classifier # # It looks like so far logistic regression performs better than the other two models right off the bat. So it might be time to compare its performance against one of the more advanced algorithms. Let's train an SVM model. SVM stands for support vector machines and before neural networks and deep learning started making a comeback in the last few years, SVMs were considered state of the art. As you will notice from running the next line, SVMs are also very compute-heavy. # # Note that by default, SVMs are **hard classifiers**, but by specifying `probability = True` we can get it to return soft predictions. Unfortunately, this comes at an added computational cost. But we need the soft predictions for later so we can plot an ROC plot. # + from sklearn.svm import SVC svmc = SVC(probability = True) svmc.fit(X_train_featurized, y_train) y_hat_train = svmc.predict(X_train_featurized) y_hat_test = svmc.predict(X_test_featurized) # - # ### Exercise # # Obtain precision and recall for the SVM classifier we trained above and compare them to the ones we got from training the logistic regression. What are your conclusion? # ### End of exercise # ## Comparing classifiers using the ROC curve and AUC # # So we trained so far four binary classification models. Having to look at precision and recall to determine which is best might be a little tedious, especially since in some cases there are trade-offs involved. Not to mention that the precision and recall metrics, as we saw in an earlier exercise, depend on a threshold. By default the threshold is set to 0.50, but we can change that and if we do we get new values for precision and recall, which means we have to go back to comparing our models again. # # This is when the ROC plot can be helpful. Unlike precision and recall, ROC is not a single metric but a graph. The ROC curve illustrates the trade-off that happens as we change our threshold from zero to 1. The closer the ROC curve comes to the top-left corner of the plot (the $(0, 1) point$, the better the classifier. If we have two classifiers $A$ and $B$, and the ROC curve of $A$ is higher than that of $B$ **at every point**, then this classifier $A$ outperforms $B$ **regardless of what threshold we choose**. # + from sklearn.metrics import roc_curve, auc import matplotlib.pyplot as plt def plot_roc(models, model_names): plt.figure(0, figsize = [8, 7]).clf() plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') for ii, model in enumerate(models): y_prob_test = model.predict_proba(X_test_featurized)[:, 1] fpr, tpr, threshold = roc_curve(y_test, y_prob_test, pos_label = "yes") roc_auc = auc(fpr, tpr) fpr, tpr, threshold = roc_curve(y_test, y_prob_test, pos_label = "yes") roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, label = "{} AUC = {:0.2f}".format(model_names[ii], roc_auc)) plt.legend(loc = 'lower right'); # - # An ROC curve for a single classifer is not very useful, but it's when we want to compare multiple classifiers that the ROC curve can save us a lot of time. Since creating an ROC curve can be tedious, the above cell has a function that does the hard work. When we call the function, all we need to do is give it the models we trained, in a list, and corresponding labels for each. In addition to plotting the ROC curve, the plot will also show the AUC (area under the ROC curve). The closer the AUC is to 1, the better the model. plot_roc([logit, knnb, dtree, svmc], ['logistic', 'k-nearest-nb', 'decision-tree', 'SVM']) # ## Multi-class classification # # We saw quite a few examples of training binary classification models. Now we're going to see examples of **multi-class classification**, namely when the number of classes is more than 2. As it turns out, multi-class classification isn't really that special. For example, one approach would be to build many classifiers, each one of each is going to distinguish one of the classes from the rest of them. This is referred to as **one-vs-all** or **one-vs-rest**. To get a prediction, we let each model predict with **soft predictions** and we predict the class to be whichever class obtained the highest probability. # # However, this can be very inefficient when the number of classes is high, and in some use-cases such as image classification the number of classes can be in the hundreds of thousands! As we will see in future lectures, neural networks can train multi-class classifier using a single model, which is a far superior approach than one-vs-rest. # # We train our multi-class classifier to predict the `job` column in the data. But because there are too many classes, we first reduce the number of classes by combining some of them. To do that, we use a remapping dictionary and pass it to the `replace` method of the `DataFrame`. We then change the target to be this new variable and drop the one-hot-encoded features related to `job` from the training and test data (otherwise we would be using `job` to predict `job` and we don't need ML to do that!). # + remap = {'entrepreneur': 'white-collar', 'housemaid': 'blue-collar', 'admin.': 'white-collar', 'management': 'white-collar', 'self-employed': 'self-emp', 'services': 'self-emp', 'student': 'unemployed', 'technician': 'blue-collar', 'unknown': 'self-emp'} y_train = X_train['job'].replace(remap) edu_onehot_cols = X_train_featurized.filter(like = 'job').columns print(edu_onehot_cols) X_train_featurized = X_train_featurized.drop(columns = edu_onehot_cols) y_test = X_test['job'].replace(remap) X_test_featurized = X_test_featurized.drop(columns = edu_onehot_cols) y_train.value_counts(normalize = True) # - # ## Multi-class logistic regression # # In many cases, we can run exact same code with run to train the binary classifier, and train a multi-class classifier instead. **Logistic regressoion** is a great example to try our multi-class classification on. The reason is that logistic regression is very efficient and is able to train a true multi-class classifier, and not one-vs-rest. # + from sklearn.linear_model import LogisticRegression logit = LogisticRegression(max_iter = 5000) logit.fit(X_train_featurized, y_train) y_hat_train = logit.predict(X_train_featurized) y_hat_test = logit.predict(X_test_featurized) # - # Multi-class classification adds its own set of complications when it comes to model evaluation. But let's begin with the easy part: **accuracy is still accuracy**. In other words, whether we predict two classes or many, accuracy is still the precentage of correct predictions. # + acc_train = accuracy_score(y_train, y_hat_train) * 100 acc_test = accuracy_score(y_test, y_hat_test) * 100 print("Accuracy on the training data: {:.0f}%.".format(acc_train)) print("Accuracy on the test data: {:.0f}%.".format(acc_test)) # - # But just as in the binary case, if the data has class imbalance, accuracy may paint too rosy a picture. One solution here is to use a **weighted accuracy** where the weights are chosen to give more importance to the classes we wish to emphasize. For example, in the next cell, we assgin `retired` and `unemployed` to have 10 times more weight than the remaining categories. We then measure weighted accuracy using the `sample_weight` argument. # + is_retired_or_self = y_test.isin(['retired', 'unemployed']) y_test_wt = is_retired_or_self * 100 + ~is_retired_or_self * 1 acc_test_wt = accuracy_score(y_test, y_hat_test, sample_weight = y_test_wt) * 100 print("Weighted cccuracy on the test data: {:.0f}%.".format(acc_test_wt)) # - # There are two things to note here: # - A weighted accuracy measure as computed above does not really have too much value in terms of explainability. They mostly serve us to have a measure to tune our models with. In other words, we can use something like the weighted accuracy as defined above to evaluate a few different models and see which one has the best performance. But the exact value of the weighted accuracy isn't something particularly noteworthy since our choice of weights wasn't something we put a lot of thought into. # - We can use weights at the time we **evaluate** a model, like we did in the above example, but we can also use weights at the time we **train** a model. Many algorithms such as `LogisticRegression` have an argument usally named `class_weight` which allows you to assign higher weights to certain classes so you can over-emphasize them **during training**. This way the model can focus on improving its predictions for those classes at the expense of the other classes. # What about measures like **precision** and **recall**? They still apply in a multi-class classification setting, but now we need to calculate precision and recall **for each class** and then average them out. To calculate precison and recall for each class, we use a **one-vs-rest** approach. To average out the values for each class, we can use a simple average (called a **macro average** here) which gives each class equal weights, or we can weigh classes by their sample size (called **support** in the results below) and caluclate a **weighted average**. # # Of course we don't need to do any of that manually (phew!): we can use the `classification_report` function for that. # + from sklearn.metrics import classification_report cl_report_train = classification_report(y_train, y_hat_train, zero_division = 0) cl_report_test = classification_report(y_test, y_hat_test, zero_division = 0) print("For training data:\n") print(cl_report_train) print("=====================================================\n") print("For test data:\n") print(cl_report_test) # - # One last word of caution about multi-class classification. We saw in the above example that `LogisticRegression` can easily and rather efficiently accomodate multi-class classification as well. However, not all algorithms are as generous! For example, the `SVC` algorithm we used in our binary classification example can also be used for multi-class classification, but at a great cost: as [explained here](https://scikit-learn.org/stable/modules/svm.html#multi-class-classification) `SVC` uses **one-vs-one** for multi-class classification. In other words, it builds a separate classifier to predict one class vs another class. If we have $m$ classes, we have $m \choose 2$ = $\frac{m!}{(m-2)!2!}$ (the left-hand-side reads **$m$ choose 2**) classifiers to build, which for even small values of $m$ can quickly get out of control. So it's important to read the documentation of each classifier to be aware of these limits. # ## Linear regression regressor # So far we've only seen classification algorithms. So it's time to change course and take a look at regression algorithms. For that we need to find a numeric target. We can use the `duration` column in the data as our target. # + y_train = X_train_featurized['duration'] X_train_featurized = X_train_featurized.drop(columns = 'duration') y_test = X_test_featurized['duration'] X_test_featurized = X_test_featurized.drop(columns = 'duration') # - # Other than changing the target from categorical to numeric, we don't have to do things very differently from before. The training and predicting part of the code remain very similar. # + from sklearn.linear_model import LinearRegression linreg = LinearRegression() linreg.fit(X_train_featurized, y_train) y_hat_train = linreg.predict(X_train_featurized) y_hat_test = linreg.predict(X_test_featurized) # - # We've almost reached the end of the notebook and are just starting to talk about regression. This is because regression algorithms are more straight-forward. None of the topics we covered when we talked about classifications are really relevant in regression. Instead, we have a short list of metrics that can be used to tell us how close the prediction comes to the actual value (**root mean squared error** or **mean absolute error**) or by how much we were able to reduce our **uncertainty** (variability) about the target by modeling it using the **featuers** ($R^2$ and **adjusted $R^2$). # + from sklearn.metrics import mean_squared_error rmse_train = mean_squared_error(y_train, y_hat_train) ** 0.5 rmse_test = mean_squared_error(y_test, y_hat_test) ** 0.5 print("RMSE on the training data: {:5.5f}.".format(rmse_train)) print("RMSE on the test data: {:5.5f}.".format(rmse_test)) # - # ### Exercise # # - Find the MAE (mean absolute error) of the model trained above. How does it compare to the RMSE? # - Find the $R^2$ (coefficient of determination) of the model trained above. How would you interpret this number? # - Find the correlation between the predicted and actual values. # - Show the distribution of the errors using `displot` in the `seaborn` package. What does the distribution suggest about the errors? # ### End of exercise # Of course there is always more we can be looking at if we want to get in the weeds. And model evaluation at the end of the day is similar to EDA (exploratory data analysis) in that you have a standard set of checks, but then you can get creative depending on what you're trying to answer. As an example, let's say we are wondering how our confidence about the prediction for `duration` depends on `marital`. To answer this we need to quantify what we mean by "confidence". That's simple: if our prediction is good then error should be low. So our confidence about the prediction can be measured using the standard deviation of the error. In other words, we can compute the standard deviation of the error grouped by `marital` to answer our question: X_test['error'] = y_test - y_hat_test # compute the error X_test['error'].groupby(X_test['marital']).std() # The above example shows the importance of having some ida **ahead-of-time** of what metrics should be used to evaluate the model. Whether it's one or a few standard metrics or some pre-defined custom metric, clarifying it ahead of time can save us some time and **prevent** us from having to go fishing for the best performance metric **after training**. In fact, doing so can be dangerous and result in over-fitting. In future classes, we learn that if we need to do this properly, in addition to the training and test set, we should also be using a **validation set**.
lesson_9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Initial imports import pandas as pd from pathlib import Path #Data Loading file_path = Path("crypto_data.csv") df_crypto = pd.read_csv(file_path) df_crypto.head() df_crypto.shape #Eliminate crypto currencies that don't trade df_crypto2 = df_crypto[df_crypto.IsTrading == True] df_crypto2.shape #drop the IsTrading column df_crypto3 = df_crypto2.drop(columns=["IsTrading"]) df_crypto3.shape #Drop any rows with null values df_crypto4 = df_crypto3.dropna() df_crypto4.shape # + #Drop the unneccesary columns df_crypto4.head() df_crypto5 = df_crypto4.drop(columns=["CoinName","Unnamed: 0"]) df_crypto5.shape df_crypto5.TotalCoinSupply = df_crypto5.TotalCoinSupply.astype(float) df_crypto5.dtypes # - # create dummy variables for categorized columns df_crypto6 = pd.get_dummies(df_crypto5) df_crypto6.shape # + from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA pca = PCA(n_components=.90) df_crypto6_scaled = StandardScaler().fit_transform(df_crypto6) crypto6_pca = pca.fit_transform(df_crypto6_scaled) df_crypto6_pca = pd.DataFrame(data=crypto6_pca) df_crypto6_pca # + from sklearn.manifold import TSNE # Initialize t-SNE model tsne = TSNE(learning_rate=35) # Reduce dimensions tsne_features = tsne.fit_transform(crypto6_pca) # - tsne_features.shape tsne_df = pd.DataFrame(tsne_features, columns=['x','y']) tsne_df import matplotlib.pyplot as plt plt.scatter(tsne_df['x'], tsne_df['y']) plt.show() # + import numpy as np from sklearn.cluster import KMeans inertia = [] # Same as k = list(range(1, 11)) k = [1,2,3,4,5,6,7,8,9,10] # Looking for the best k for i in k: km = KMeans(n_clusters=i, random_state=0) km.fit(tsne_df) inertia.append(km.inertia_) # Define a DataFrame to plot the Elbow Curve using hvPlot elbow_data = {"k": k, "inertia": inertia} df_elbow = pd.DataFrame(elbow_data) plt.plot(df_elbow['k'], df_elbow['inertia']) plt.xticks(range(1,11)) plt.xlabel('Number of clusters') plt.ylabel('Inertia') plt.show()
Crypto data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # name: python3 # --- # + [markdown] id="Ja7zPK-Gvi7Z" # # Prompt Tuning # # # + id="cE2jNS5UMXKh" colab={"base_uri": "https://localhost:8080/"} outputId="525b14e8-82f3-471c-a90e-6a54dca12ca4" #@title Colab-specific setup #@markdown This will ask for you to log into Google Drive. #@markdown Click on the link and copy over your access token. import torch colab = 'google.colab' in str(get_ipython()) if colab: # !nvidia-smi gpu_type = torch.cuda.get_device_name(0) if gpu_type != 'Tesla T4': raise ValueError("Highly advised to use a T4.") # Setup for Colab only if colab: # !pip install git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3 # !pip install git+https://github.com/corolla-johnson/mkultra.git#egg=mkultra --log PIP_LOG # !pip install gdown # !pip install datasets # !pip install tqdm # Add word wrapping to outputs from IPython.display import HTML, display def set_css(): display(HTML(''' <style> pre { white-space: pre-wrap; } </style> ''')) get_ipython().events.register('pre_run_cell', set_css) # If on Colab, mount your Google Drive first! if colab: from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="jFjyHZRoimo5" outputId="604d668e-54fd-4b26-a5a9-ab5d51beebb2" #@title (Optional) Grab text from Project Gutenberg #@markdown We'll be using "Alice's Adventures in Wonderland" by <NAME>. import requests, re data_str = requests.get("https://www.gutenberg.org/files/11/11-0.txt").content.decode("utf-8") # Do a little cleanup clean_data_str = data_str def regex_replace(str, regex, group, replacement): pat = re.compile(regex) while True: m = pat.search(str) if m is not None: str = str[:m.start(group)] + replacement + str[m.end(group):] else: break return str # Remove carriage returns clean_data_str = regex_replace(clean_data_str, r"\r", 0, "") # Replace single newlines with spaces clean_data_str = regex_replace(clean_data_str, r"\S(\n)\S", 1, " ") # Remove left quotes clean_data_str = regex_replace(clean_data_str, r"\u201C", 0, '"') # Remove right quotes clean_data_str = regex_replace(clean_data_str, r"\u201D", 0, '"') # Remove italics clean_data_str = regex_replace(clean_data_str, r"_", 0, '') # Remove header and footer clean_data_str = clean_data_str[1434:-18595] print(clean_data_str) with open("alice.txt", "w") as file: file.write(clean_data_str) # + id="YjCfqiHmy1fA" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="af6902ce-3a7b-41fc-890a-116073a26574" #@title Load tokenizer from transformers import GPT2TokenizerFast tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") # + id="tWYdCgONMXKm" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="2f1f6c44-e2c9-4f92-9bdc-d810d2b3bc35" #-----------------------# # Training Parameters # #-----------------------# # Use a string to set the initial value of the soft prompt. # Be aware of the number of tokens. initial_prompt = "A surreal children's fantasy story set in a subterranean world populated by peculiar anthropomorphic creatures.\n" print(f"Initial prompt length: {len(tokenizer.encode(initial_prompt))} tokens") # Decide the length of your training blocks in tokens. # Safe sizes for gpt-neo-2.7B-halved: # - 700 on a Colab T4 (16GB) # - 400 on a Colab K80 (12GB) # - 32 on a GTX1080 (8GB) # If it seems a bit small, don't worry! # Soft prompts can be moved forward in context for the best effect. block_size = 700 # Name your soft prompt project. sp_name = 'alice-cyclic-dropout-2' # What's the name of model you'll be using? # e.g. gpt2, gpt2-large, gpt-neo-2.7B # (This will be added to the project directory and soft prompt name) model_name = 'gpt2' # Specify the model directory or huggingface name. model_dir = 'gpt2' # The above model_dir will download GPT2 1.5B from Huggingface as a baseline. # It is recommended to use finetuneanon's FP16 fork of gpt-neo-2.7B, which can be downloaded from this magnet link: # magnet:?xt=urn:btih:f50bb4e259d2f96aa9151443950b0d2b899a097c&dn=gpt-neo-2.7B-halved&tr=http%3A%2F%2Fopenbittorrent.com%3A80%2Fannounce&tr=http%3A%2F%2Ft.nyaatracker.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce # Once you've saved it to your local machine, create a 'models' folder in your Google Drive and upload it there, # then uncomment the following: #model_dir = "/content/drive/MyDrive/models/gpt-neo-2.7B-halved/" # Should be 'gpt2' or 'gpt-neo'. model_type = 'gpt2' # Specify the path to the text file used for training. text_path = "alice.txt" # You can also use something uploaded to your Google Drive, e.g. #text_path = "/content/drive/MyDrive/datasets/nm_burning_chrome.txt" # Specify the project directory. project_dir = f"/content/drive/MyDrive/soft_prompts/{sp_name}-{model_name}/" # Checkpoint interval in steps. checkpoint_interval = 20 # Evaluation interval in steps. eval_interval = 5 # How many blocks to use for evaluation. eval_blocks = 16 # Adafactor hyperparameters optimizer_params = { # Fixed learning rate, recommend 1e-4 to 1e-3 "lr": 2e-4, # 1st momentum, recommend 0 "beta1": 0.0, # 2nd momentum decay schedule, recommend -0.3 (lower is slower) "decay_rate": -0.8, # Weight decay, recommend 1e-5 "weight_decay": 0.1, # Update scaling, recommend False "scale_parameter": False, # Built-in LR scheduler, recommend False "relative_step": False } # LR scheduler parameters scheduler_params = { "num_warmup_steps": 10, "num_cycles": 8, "num_training_steps": 400 } # (Use these for GPT-Neo) #scheduler_params = { # "num_warmup_steps": 10, # "num_cycles": 4, # "num_training_steps": 240 #} base_acc_steps = 16 acc_doubling_rate = 0 plateau_steps = 0 # + id="kEVELdEDttvO" colab={"base_uri": "https://localhost:8080/", "height": 17} cellView="form" outputId="2c85cad4-e91e-4ca5-bf0e-1ae2e3fc8d8b" #@title Load model from mkultra.tuning import GPTNeoPromptTuningLM, GPT2PromptTuningLM if 'model' not in globals(): if model_type == 'gpt2': model = GPT2PromptTuningLM.from_pretrained(model_dir).half().to("cuda") elif model_type == 'gpt-neo': model = GPTNeoPromptTuningLM.from_pretrained(model_dir).half().to("cuda") else: raise "Invalid model type" # + id="BZAT90ZCMXKo" cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="455d3100-04f5-49db-c96d-3e3b95ddf270" #@title Initialize project #@markdown This will load the latest checkpoint if the project directory already exists. from mkultra.soft_prompt import SoftPrompt from transformers import Adafactor import os filename_for_checkpoint = lambda step: f"{sp_name}-{model_name}-step-{step}.json" loaded_sp = None project_files = None # Look for existing project directory try: os.makedirs(project_dir) print(f"Created project directory at {project_dir}") except FileExistsError: print(f"Found project directory at {project_dir}") # Look for existing checkpoints project_files = os.listdir(project_dir) if project_files is not None: checkpoint_files = [check_file for check_file in project_files if ('-step-' in check_file) ] if len(checkpoint_files) > 0: highest_step = max([ int(check_file[check_file.rfind('-step-')+6:-5]) for check_file in checkpoint_files ]) loaded_sp = SoftPrompt.from_file( os.path.join(project_dir, filename_for_checkpoint(highest_step)) ) print(f"Loading latest checkpoint: {highest_step}") else: print("No checkpoints found") # + tags=[] id="pf7wZnxNtR29" colab={"base_uri": "https://localhost:8080/", "height": 124} cellView="form" outputId="0f1eb6bf-edce-43cb-9201-a5e995ea8879" #@title Process dataset #@markdown This will load an existing set #@markdown of tokens if present in the project directory. import json import math text_tokenized = None tokens_path = os.path.join(project_dir,"tokens.json") # See if we already have a tokens file try: with open(tokens_path, 'r', encoding='utf-8') as file: text_tokenized = json.load(file) print("Loaded existing tokens.json file") except FileNotFoundError: print("No tokens.json exists, creating it...") # If not, make one now if text_tokenized is None: with open(text_path, 'r', encoding='utf-8') as file: text = file.read() text_tokenized = tokenizer.encode(text) with open(tokens_path, 'x', encoding='utf-8') as file: json.dump(text_tokenized, file) text_length = len(text_tokenized) num_blocks = math.ceil(text_length/block_size) print(f"Length of text: {len(text_tokenized)} tokens") print(f"Number of blocks: {num_blocks}, each {block_size} tokens") # Partition tokens into blocks blocks = list() for block_num in range(num_blocks): start = block_num * block_size end = min(start + block_size, text_length) blocks.append( text_tokenized[start:end] ) block_order_path = os.path.join(project_dir, "block_order.json") # See if we already have a block_order file try: with open(block_order_path, 'r', encoding='utf-8') as file: block_order = json.load(file) print("Loaded existing block_order.json file") except FileNotFoundError: print("No block_order.json exists, creating it...") block_order = [*range(num_blocks)] with open(block_order_path, 'x', encoding='utf-8') as file: json.dump(block_order, file) # + id="TI_f3H5lXeLt" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e03e52db-a742-4020-e671-129a27b00b1d" #@title Initialize soft prompt in model #@markdown If a checkpoint is present, use that. if loaded_sp is None: initial_sp = SoftPrompt.from_string(initial_prompt, model, tokenizer) print(f"Initial prompt length: {len(initial_sp)}") model.set_soft_prompt(initial_sp) sp_step = 0 eval_loss = 100 else: model.set_soft_prompt(loaded_sp) sp_step = loaded_sp._metadata['step'] eval_loss = loaded_sp._metadata['loss'] # + id="nYaLKE1YtR3C" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="dea1b23d-285b-4f5f-d242-834e63a03e3e" # Configure number of steps to train for. # One step is (acc_steps) forward passes. num_training_steps = scheduler_params['num_training_steps'] # + colab={"base_uri": "https://localhost:8080/", "height": 17} id="LlqN3HcGQMZ0" outputId="aeffd993-9b58-491a-8311-1e6db863ac21" from transformers import AdamW, Adafactor import transformers # Feed soft params to optimizer optimizer_params['params'] = [model.get_soft_params()] optimizer = Adafactor(**optimizer_params) optimizer.state['step'] = sp_step scheduler_params['optimizer'] = optimizer scheduler = transformers.get_cosine_with_hard_restarts_schedule_with_warmup(**scheduler_params) # + tags=["outputPrepend"] id="ImdPj_CftR3C" colab={"base_uri": "https://localhost:8080/", "height": 138, "referenced_widgets": ["563f05f9831a4913a9fb9fb7116374c4", "367016fea14b4eabaf0a1a1c424e63a4", "57bcb1b7ab324955ac48b7e22c51d695", "034feddd372b458d8e82848fc4db02fc", "870cc0cf45cb4895b400ac2c0d1df2d4", "7668769f21704c5b9903086b69c15029", "16ff7090ebcd4388b1a747060b99a327", "9396770b5f4842ac8e01c56dc5b80909"]} cellView="form" outputId="89ee86d1-4273-420a-bce4-37201109d3c0" #@title Train the soft prompt from tqdm.notebook import tqdm import random import torch import math torch.cuda.empty_cache() loss_log_path = os.path.join(project_dir,"loss_log.csv") bar = tqdm(total=num_training_steps) optimizer.state['step'] = sp_step evals_since_last_improvement = 0 best_eval = float('inf') # Fix eval order eval_order = [*range(num_blocks)] random.seed(1234) random.shuffle(eval_order) # Function for gradient accumulation scheduling def get_acc_steps(sp_step): if acc_doubling_rate != 0: return round(base_acc_steps * math.pow(2, (sp_step / acc_doubling_rate))) else: return base_acc_steps for session_step in range(num_training_steps): model.train() acc_steps = get_acc_steps(sp_step) for i in range(acc_steps): idx = (sp_step*acc_steps + i) % num_blocks # Shuffle blocks every epoch if idx == 0: random.shuffle(block_order) with open(block_order_path, 'w', encoding='utf-8') as file: json.dump(block_order, file) block = blocks[block_order[idx]] input_ids = torch.LongTensor(block).unsqueeze(0).cuda().detach() # Forward pass and optimize outputs = model(input_ids=input_ids, labels=input_ids) loss = outputs.loss loss.backward() instant_loss = loss.item() if math.isnan(instant_loss): torch.cuda.empty_cache() raise KeyboardInterrupt # Discard tensor that was moved to GPU del input_ids torch.cuda.empty_cache() # Accumulate gradients optimizer.step() lr = optimizer.param_groups[0]["lr"] scheduler.step() optimizer.zero_grad() if math.isnan(instant_loss): torch.cuda.empty_cache() raise KeyboardInterrupt # Evaluate model and plot loss if sp_step%eval_interval == 0: model.eval() torch.cuda.empty_cache() eval_loss = 0 with torch.no_grad(): for eval_step in range(eval_blocks): block = blocks[eval_order[eval_step]] input_ids = torch.LongTensor(block).unsqueeze(0).cuda().detach() eval_loss += model(input_ids=input_ids, labels=input_ids).loss.item() # Discard tensor that was moved to GPU del input_ids torch.cuda.empty_cache() eval_loss /= eval_blocks with open(loss_log_path, 'a', encoding='utf-8') as file: file.write(f"{sp_step},{eval_loss}\n") # Stop if loss has plateaued if plateau_steps != 0: if eval_loss < best_eval: best_eval = eval_loss evals_since_last_improvement = 0 else: evals_since_last_improvement += 1 if evals_since_last_improvement > plateau_steps: print(f"No improvement for {plateau_steps} evals") break # Save checkpoint every so often if sp_step%checkpoint_interval == 0: sp = SoftPrompt.from_tuning_model(model, {"name" : sp_name + f"-step-{sp_step}", "step" : sp_step, "loss" : eval_loss}) sp.to_file( os.path.join( project_dir,filename_for_checkpoint(sp_step) ) ) bar.set_postfix({ "Model Step" : sp_step, "Eval Loss" : "{el:.5f}".format(el=eval_loss), "Acc Steps" : acc_steps, "LR" : lr }) bar.update(1) sp_step += 1 # Save a checkpoint once done sp = SoftPrompt.from_tuning_model(model, {"name" : sp_name + f"-step-{sp_step}", "step" : sp_step, "loss" : eval_loss}) sp.to_file( os.path.join( project_dir,filename_for_checkpoint(sp_step) ) ) # + id="-UdO6kRiaptn" cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="d06311f9-23b6-4f74-83cc-4e899a9f9579" #@title Flush memory after interrupting training #@markdown This will *hopefully* prevent a CUDA out-of-memory error. try: del input_ids except Exception: pass torch.cuda.empty_cache() # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="JPZpBzbvaoru" outputId="5c8fc5d2-ee82-4c94-c2a7-2d1e9eb52f5f" # Plot loss import matplotlib.pyplot as plt import matplotlib.cbook as cbook import numpy as np fname2 = cbook.get_sample_data(loss_log_path, asfileobj=False) with cbook.get_sample_data(loss_log_path) as file: array = np.loadtxt(file, delimiter=",") fig = plt.figure() plt.plot(array[:, 0], array[:, 1]) # + id="YKypaYDOtR3E" colab={"base_uri": "https://localhost:8080/", "height": 160} outputId="43002925-1b61-4775-9bbb-1415bfc72c46" # Try generating with your model model.eval() # Restore soft prompt from checkpoint # (Use above graph to find a good stopping point and check project directory for valid checkpoints) sp = SoftPrompt.from_file( os.path.join(project_dir, filename_for_checkpoint(400)) ) model.set_soft_prompt(sp) test = "Alice sipped her tea as the white rabbit gloated about his vast collection of pocket watches" call = tokenizer(test, return_tensors="pt").input_ids.cuda() basic_output = model.generate( input_ids=call, do_sample=True, min_length=call.shape[-1] + 200, max_length=call.shape[-1] + 200, temperature=1.0, tfs = 0.9, repetition_penalty = 3.0, pad_token_id=tokenizer.eos_token_id ) print(tokenizer.decode(basic_output[0])) # + colab={"base_uri": "https://localhost:8080/", "height": 160} id="4RnpmIbbjUTI" outputId="93d9b3d1-636c-413e-e86b-c20a1b3afe8d" model.eval() # Purge soft prompt for comparison. model.initialize_soft_prompt(n_tokens=1) test = "Alice sipped her tea as the white rabbit gloated about his vast collection of pocket watches" call = tokenizer(test, return_tensors="pt").input_ids.cuda() basic_output = model.generate( input_ids=call, do_sample=True, min_length=call.shape[-1] + 200, max_length=call.shape[-1] + 200, temperature=1.0, tfs = 0.9, repetition_penalty = 3.0, pad_token_id=tokenizer.eos_token_id ) print(tokenizer.decode(basic_output[0]))
tuning_finetune.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 使用决策树或者随机森林来预测分类对象 # ## 参考了下面的教程 # https://pchun.work/%e6%b1%ba%e5%ae%9a%e6%9c%a8%e3%81%a8%e3%83%a9%e3%83%b3%e3%83%80%e3%83%a0%e3%83%95%e3%82%a9%e3%83%ac%e3%82%b9%e3%83%88%e3%81%ab%e3%82%88%e3%82%8b%e3%83%87%e3%83%bc%e3%82%bf%e5%88%86%e9%a1%9e/ # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" # -*- coding: utf-8 -*- from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn import tree # - iris=load_iris() x=iris.data y=iris.target x.shape,y.shape train_x,test_x,train_y,test_y=train_test_split(x,y,test_size=0.3,random_state=0) train_x.shape,test_x.shape train_x[:5,:],y[:5] dtc=tree.DecisionTreeClassifier(max_depth=3) dtc.fit(train_x,train_y) print(dtc.score(test_x,test_y)) # 预测结果 print(dtc.predict(test_x)) # 实际正解 print(test_y) from sklearn.model_selection import StratifiedKFold,cross_val_score import numpy as np # k=10,进行10个分割,进行交叉验证 stratifiedkfold=StratifiedKFold(n_splits=10) # 交叉验证的时候直接使用x,y不使用训练数据集,因为cross_val_score会自动分割训练数据集和验证数据集 score=cross_val_score(dtc,x,y,cv=stratifiedkfold) # 显示各个结果 print(score) # 显示平均结果 print(np.mean(score)) dtc_score=np.mean(score) # # ランダムフォレスト RandomForest # 很多个决策树bagging,并行高速运行提高速度 # 即使很多个决策树也很难导致过学习 # ### 使用交叉验证来构筑model from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split,cross_val_score,StratifiedKFold iris=load_iris() x=iris.data y=iris.target train_x,test_x,train_y,test_y=train_test_split(x,y,test_size=0.3,random_state=0) stratifiedkfold=StratifiedKFold(n_splits=10) rfc=RandomForestClassifier(max_depth=3,random_state=0) # 交叉验证的时候直接使用x,y不使用训练数据集,因为cross_val_score会自动分割训练数据集和验证数据集 score=cross_val_score(rfc,x,y,cv=stratifiedkfold) print(score) print(np.mean(score)) rfc_score=np.mean(score) # ### 使用普通的fit score来构筑model rfc=RandomForestClassifier(max_depth=3,random_state=0) rfc.fit(train_x,train_y) score=rfc.score(test_x,test_y) print(score) # # 使用神经网络MLPClassifier来预测结果 from sklearn.neural_network import MLPClassifier from sklearn.model_selection import train_test_split,cross_val_score,StratifiedKFold x=iris.data y=iris.target train_x,test_x,train_y,test_y=train_test_split(x,y,test_size=0.3,random_state=0) train_x.shape,test_x.shape mlpc=MLPClassifier(max_iter=10000,activation='relu') mlpc.fit(train_x,train_y) score=mlpc.score(test_x,test_y) print(score) # ### 使用交叉验证来构筑model stratifiedkfold=StratifiedKFold(n_splits=10) mlpc=MLPClassifier(max_iter=10000,activation='relu') score=cross_val_score(mlpc,x,y,cv=stratifiedkfold) print(score) print(np.mean(score)) mlpc_score=np.mean(score) dic={'Decision Tree':dtc_score,'RandomForest':rfc_score,'MLPClassifier':mlpc_score} dic_sort = sorted(dic.items(), key=lambda d: d[1], reverse=True) for dic in dic_sort: print("Model:{} Score:{}%".format(dic[0],round(dic[1],4)*100))
experiment/practices-dicisiontreeclassifier-randomforest.ipynb