text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt # TODO Read in weight_loss.csv # Assign variables to columns mean_group_a = np.mean(weight_lost_a) mean_group_b = np.mean(weight_lost_b) plt.hist(weight_lost_a) plt.show() plt.hist(weight_lost_b) plt.show() mean_difference = mean_group_b - mean_group_a print(mean_difference) mean_difference = 2.52 print(all_values) mean_differences = [] for i in range(1000): group_a = [] group_b = [] for value in all_values: assignment_chance = np.random.rand() if assignment_chance >= 0.5: group_a.append(value) else: group_b.append(value) iteration_mean_difference = np.mean(group_b) - np.mean(group_a) mean_differences.append(iteration_mean_difference) plt.hist(mean_differences) plt.show() sampling_distribution = {} for df in mean_differences: if sampling_distribution.get(df, False): sampling_distribution[df] = sampling_distribution[df] + 1 else: sampling_distribution[df] = 1 frequencies = [] for sp in sampling_distribution.keys(): if sp >= 2.52: frequencies.append(sampling_distribution[sp]) p_value = np.sum(frequencies) / 1000 ``` Chi-squared tests - creating distribution ``` chi_squared_values = [] from numpy.random import random import matplotlib.pyplot as plt for i in range(1000): sequence = random((32561,)) sequence[sequence < .5] = 0 sequence[sequence >= .5] = 1 male_count = len(sequence[sequence == 0]) female_count = len(sequence[sequence == 1]) male_diff = (male_count - 16280.5) ** 2 / 16280.5 female_diff = (female_count - 16280.5) ** 2 / 16280.5 chi_squared = male_diff + female_diff chi_squared_values.append(chi_squared) plt.hist(chi_squared_values) chi_squared_values = [] from numpy.random import random import matplotlib.pyplot as plt # loop 1000 times for i in range(1000): # numpy random generating 300 numbers between 0.0 and 1.0. # get a vector with 300 elements. sequence = random((300,)) # # if it is less than .5, replace it with 0 sequence[sequence < .5] = 0 # otherwise replace it with 1 sequence[sequence >= .5] = 1 # Compute the male_diff by subtracting the expected Male count (150) # from the observed Male count, squaring it, #and dividing by the expected Male count. Do the same for female_diff male_count = len(sequence[sequence == 0]) female_count = len(sequence[sequence == 1]) male_diff = (male_count - 150) ** 2 / 150 female_diff = (female_count - 150) ** 2 / 150 # find the chi squared chi_squared = male_diff + female_diff # append the values chi_squared_values.append(chi_squared) plt.hist(chi_squared_values) diffs = [] observed = [27816, 3124, 1039, 311, 271] expected = [26146.5, 3939.9, 944.3, 260.5, 1269.8] for i, obs in enumerate(observed): exp = expected[i] diff = (obs - exp) ** 2 / exp diffs.append(diff) race_chisq = sum(diffs) from scipy.stats import chisquare observed = np.array([27816, 3124, 1039, 311, 271]) expected = np.array([26146.5, 3939.9, 944.3, 260.5, 1269.8]) chisquare_value, race_pvalue = chisquare(observed, expected) table = pd.crosstab(income["sex"], [income["race"]]) print(table) ```
github_jupyter
``` import numpy as np docs = ["I enjoy playing TT", "I like playing TT"] docs[0][0].split() from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(min_df=0, token_pattern=r"\b\w+\b") vectorizer.fit(docs) print(vectorizer.vocabulary_) # encode document vector = vectorizer.transform(docs) # summarize encoded vector print(vector.shape) print(type(vector)) print(vector.toarray()) print(vectorizer.vocabulary_) print(vector.shape) print(vector.toarray()) x = [] y = [] for i in range(len(docs)): for j in range(len(docs[i].split())): t_x = [] t_y = [] for k in range(4): if(j==k): t_y.append(docs[i].split()[k]) continue else: t_x.append(docs[i].split()[k]) x.append(t_x) y.append(t_y) x y x2 = [] y2 = [] for i in range(len(x)): x2.append(' '.join(x[i])) y2.append(' '.join(y[i])) x2 y2 vector_x = vectorizer.transform(x2) vector_x.toarray() vector_y = vectorizer.transform(y2) vector_y.toarray() from keras.models import Sequential from keras.layers import Dense, Embedding from keras.layers import LSTM , Bidirectional,Dropout from keras import backend as K from keras.layers.advanced_activations import LeakyReLU from keras import regularizers model = Sequential() model.add(Dense(3, activation='linear', input_shape=(5,))) model.add(Dense(5,activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy',optimizer='adam') model.fit(vector_x, vector_y, epochs=1000, batch_size=4,verbose=1) model.predict(vector_x) [list(vectorizer.vocabulary_.keys())[0]] vectorizer.transform([list(vectorizer.vocabulary_.keys())[1]]).toarray() model.summary() from keras.models import Model layer_name = 'dense_1' intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output) for i in range(len(vectorizer.vocabulary_)): word = list(vectorizer.vocabulary_.keys())[i] word_vec = vectorizer.transform([list(vectorizer.vocabulary_.keys())[i]]).toarray() print(word, '\t', intermediate_layer_model.predict(word_vec)) ``` # Measuring similarity between word vectors ``` a = word = list(vectorizer.vocabulary_.keys())[1] word_vec_a = intermediate_layer_model.predict(vectorizer.transform([list(vectorizer.vocabulary_.keys())[1]]).toarray()) b = word = list(vectorizer.vocabulary_.keys())[4] word_vec_b = intermediate_layer_model.predict(vectorizer.transform([list(vectorizer.vocabulary_.keys())[4]]).toarray()) word_vec_a np.sum(word_vec_a*word_vec_b)/((np.sqrt(np.sum(np.square(word_vec_a))))*np.sqrt(np.sum(np.square(word_vec_b)))) np.sum(np.square(word_vec_a - word_vec_b)) ```
github_jupyter
# Video Super Resolution with OpenVINO Super Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook applies Single Image Super Resolution (SISR) to frames in a 360p (480×360) video in 360p resolution. We use a model called [single-image-super-resolution-1032](https://github.com/openvinotoolkit/open_model_zoo/tree/develop/models/intel/single-image-super-resolution-1032) which is available from the Open Model Zoo. It is based on the research paper cited below. Y. Liu et al., ["An Attention-Based Approach for Single Image Super Resolution,"](https://arxiv.org/abs/1807.06779) 2018 24th International Conference on Pattern Recognition (ICPR), 2018, pp. 2777-2784, doi: 10.1109/ICPR.2018.8545760. **NOTE:** The Single Image Super Resolution (SISR) model used in this demo is not optimized for video. Results may vary depending on the video. We are looking for a more suitable Multi Image Super Resolution (MISR) model, so if you know of a great open source model, please let us know! You can start a [discussion](https://github.com/openvinotoolkit/openvino_notebooks/discussions) or create an [issue](https://github.com/openvinotoolkit/openvino_notebooks/issues) on GitHub. ## Preparation ### Imports ``` import os import time import urllib from pathlib import Path import cv2 import numpy as np from IPython.display import ( HTML, FileLink, Pretty, ProgressBar, Video, clear_output, display, ) from openvino.inference_engine import IECore from pytube import YouTube ``` ### Settings ``` # Device to use for inference. For example, "CPU", or "GPU" DEVICE = "CPU" # 1032: 4x superresolution, 1033: 3x superresolution MODEL_FILE = "model/single-image-super-resolution-1032.xml" model_name = os.path.basename(MODEL_FILE) model_xml_path = Path(MODEL_FILE).with_suffix(".xml") ``` ### Functions ``` def write_text_on_image(image: np.ndarray, text: str) -> np.ndarray: """ Write the specified text in the top left corner of the image as white text with a black border. :param image: image as numpy arry with HWC shape, RGB or BGR :param text: text to write :return: image with written text, as numpy array """ font = cv2.FONT_HERSHEY_PLAIN org = (20, 20) font_scale = 4 font_color = (255, 255, 255) line_type = 1 font_thickness = 2 text_color_bg = (0, 0, 0) x, y = org image = cv2.UMat(image) (text_w, text_h), _ = cv2.getTextSize( text=text, fontFace=font, fontScale=font_scale, thickness=font_thickness ) result_im = cv2.rectangle( img=image, pt1=org, pt2=(x + text_w, y + text_h), color=text_color_bg, thickness=-1 ) textim = cv2.putText( img=result_im, text=text, org=(x, y + text_h + font_scale - 1), fontFace=font, fontScale=font_scale, color=font_color, thickness=font_thickness, lineType=line_type, ) return textim.get() def load_image(path: str) -> np.ndarray: """ Loads an image from `path` and returns it as BGR numpy array. :param path: path to an image filename or url :return: image as numpy array, with BGR channel order """ if path.startswith("http"): # Set User-Agent to Mozilla because some websites block requests # with User-Agent Python request = urllib.request.Request(url=path, headers={"User-Agent": "Mozilla/5.0"}) response = urllib.request.urlopen(url=request) array = np.asarray(bytearray(response.read()), dtype="uint8") image = cv2.imdecode(buf=array, flags=-1) # Loads the image as BGR else: image = cv2.imread(filename=path) return image def convert_result_to_image(result) -> np.ndarray: """ Convert network result of floating point numbers to image with integer values from 0-255. Values outside this range are clipped to 0 and 255. :param result: a single superresolution network result in N,C,H,W shape """ result = result.squeeze(0).transpose(1, 2, 0) result *= 255 result[result < 0] = 0 result[result > 255] = 255 result = result.astype(np.uint8) return result ``` ## Load the Superresolution Model Load the model in Inference Engine with `ie.read_network` and load it to the specified device with `ie.load_network` ``` ie = IECore() net = ie.read_network(model=model_xml_path) exec_net = ie.load_network(network=net, device_name=DEVICE) ``` Get information about network inputs and outputs. The Super Resolution model expects two inputs: 1) the input image, 2) a bicubic interpolation of the input image to the target size 1920x1080. It returns the super resolution version of the image in 1920x1800. ``` # Network inputs and outputs are dictionaries. Get the keys for the # dictionaries. original_image_key = list(exec_net.input_info)[0] bicubic_image_key = list(exec_net.input_info)[1] output_key = list(exec_net.outputs.keys())[0] # Get the expected input and target shape. `.dims[2:]` returns the height # and width. OpenCV's resize function expects the shape as (width, height), # so we reverse the shape with `[::-1]` and convert it to a tuple input_height, input_width = tuple(exec_net.input_info[original_image_key].tensor_desc.dims[2:]) target_height, target_width = tuple(exec_net.input_info[bicubic_image_key].tensor_desc.dims[2:]) upsample_factor = int(target_height / input_height) print(f"The network expects inputs with a width of {input_width}, " f"height of {input_height}") print(f"The network returns images with a width of {target_width}, " f"height of {target_height}") print( f"The image sides are upsampled by a factor {upsample_factor}. " f"The new image is {upsample_factor**2} times as large as the " "original image" ) ``` ## Superresolution on Video Download a YouTube\* video with PyTube and enhance the video quality with superresolution. By default only the first 100 frames of the video are processed. Change NUM_FRAMES in the cell below to modify this. **Note:** - The resulting video does not contain audio. - The input video should be a landscape video and have an an input resultion of 360p (640x360) for the 1032 model, or 480p (720x480) for the 1033 model. ### Settings ``` VIDEO_DIR = "data" OUTPUT_DIR = "output" os.makedirs(name=str(OUTPUT_DIR), exist_ok=True) # Number of frames to read from the input video. Set to 0 to read all frames. NUM_FRAMES = 100 # The format for saving the result video's # vp09 is slow, but widely available. If you have FFMPEG installed, you can # change the FOURCC to `*"THEO"` to improve video writing speed FOURCC = cv2.VideoWriter_fourcc(*"vp09") ``` ### Download and Prepare Video ``` # Use pytube to download a video. It downloads to the videos subdirectory. # You can also place a local video there and comment out the following lines VIDEO_URL = "https://www.youtube.com/watch?v=V8yS3WIkOrA" yt = YouTube(VIDEO_URL) # Use `yt.streams` to see all available streams. See the PyTube documentation # https://python-pytube.readthedocs.io/en/latest/api.html for advanced # filtering options try: os.makedirs(name=VIDEO_DIR, exist_ok=True) stream = yt.streams.filter(resolution="360p").first() filename = Path(stream.default_filename.encode("ascii", "ignore").decode("ascii")).stem stream.download(output_path=OUTPUT_DIR, filename=filename) print(f"Video {filename} downloaded to {OUTPUT_DIR}") # Create Path objects for the input video and the resulting videos video_path = Path(stream.get_file_path(filename, OUTPUT_DIR)) except Exception: # If PyTube fails, use a local video stored in the VIDEO_DIR directory video_path = Path(rf"{VIDEO_DIR}/CEO Pat Gelsinger on Leading Intel.mp4") # Path names for the result videos superres_video_path = Path(f"{OUTPUT_DIR}/{video_path.stem}_superres.mp4") bicubic_video_path = Path(f"{OUTPUT_DIR}/{video_path.stem}_bicubic.mp4") comparison_video_path = Path(f"{OUTPUT_DIR}/{video_path.stem}_superres_comparison.mp4") # Open the video and get the dimensions and the FPS cap = cv2.VideoCapture(filename=str(video_path)) ret, image = cap.read() if not ret: raise ValueError(f"The video at '{video_path}' cannot be read.") fps = cap.get(cv2.CAP_PROP_FPS) original_frame_height, original_frame_width = image.shape[:2] cap.release() print( f"The input video has a frame width of {original_frame_width}, " f"frame height of {original_frame_height} and runs at {fps:.2f} fps" ) ``` Create superresolution video, bicubic video and comparison video. The superresolution video contains the enhanced video, upsampled with superresolution, the bicubic video is the input video upsampled with bicubic interpolation, the combination video sets the bicubic video and the superresolution side by side. ``` superres_video = cv2.VideoWriter( filename=str(superres_video_path), fourcc=FOURCC, fps=fps, frameSize=(target_width, target_height), ) bicubic_video = cv2.VideoWriter( filename=str(bicubic_video_path), fourcc=FOURCC, fps=fps, frameSize=(target_width, target_height), ) comparison_video = cv2.VideoWriter( filename=str(comparison_video_path), fourcc=FOURCC, fps=fps, frameSize=(target_width * 2, target_height), ) ``` ### Do Inference Read video frames and enhance them with superresolution. Save the superresolution video, the bicubic video and the comparison video to file. The code in this cell reads the video frame by frame. Each frame is resized and reshaped to network input shape and upsampled with bicubic interpolation to target shape. Both the original and the bicubic image are propagated through the network. The network result is a numpy array with floating point values, with a shape of (1,3,1920,1080). This array is converted to an 8-bit image with shape (1080,1920,3) and written to `superres_video`. The bicubic image is written to `bicubic_video` for comparison. Lastly, the bicubic and result frames are combined side by side and written to `comparison_video`. A progress bar shows the progress of the process. Inference time is measured, as well as total time to process each frame, which includes inference time as well as the time it takes to process and write the video. ``` start_time = time.perf_counter() frame_nr = 1 total_inference_duration = 0 total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) if NUM_FRAMES == 0 else NUM_FRAMES progress_bar = ProgressBar(total=total_frames) progress_bar.display() cap = cv2.VideoCapture(filename=str(video_path)) try: while cap.isOpened(): ret, image = cap.read() if not ret: cap.release() break if NUM_FRAMES > 0 and frame_nr == NUM_FRAMES: break # Resize the input image to network shape and convert from (H,W,C) to # (N,C,H,W) resized_image = cv2.resize(src=image, dsize=(input_width, input_height)) input_image_original = np.expand_dims(resized_image.transpose(2, 0, 1), axis=0) # Resize and reshape the image to the target shape with bicubic # interpolation bicubic_image = cv2.resize( src=image, dsize=(target_width, target_height), interpolation=cv2.INTER_CUBIC ) input_image_bicubic = np.expand_dims(bicubic_image.transpose(2, 0, 1), axis=0) # Do inference inference_start_time = time.perf_counter() result = exec_net.infer( inputs={ original_image_key: input_image_original, bicubic_image_key: input_image_bicubic, } )[output_key] inference_stop_time = time.perf_counter() inference_duration = inference_stop_time - inference_start_time total_inference_duration += inference_duration # Transform inference result into an image result_frame = convert_result_to_image(result=result) # Write resulting image and bicubic image to video superres_video.write(image=result_frame) bicubic_video.write(image=bicubic_image) stacked_frame = np.hstack((bicubic_image, result_frame)) comparison_video.write(image=stacked_frame) frame_nr = frame_nr + 1 # Update progress bar and status message progress_bar.progress = frame_nr progress_bar.update() if frame_nr % 10 == 0: clear_output(wait=True) progress_bar.display() display( Pretty( f"Processed frame {frame_nr}. Inference time: " f"{inference_duration:.2f} seconds " f"({1/inference_duration:.2f} FPS)" ) ) except KeyboardInterrupt: print("Processing interrupted.") finally: superres_video.release() bicubic_video.release() comparison_video.release() end_time = time.perf_counter() duration = end_time - start_time print(f"Video's saved to {comparison_video_path.parent} directory.") print( f"Processed {frame_nr} frames in {duration:.2f} seconds. Total FPS " f"(including video processing): {frame_nr/duration:.2f}. " f"Inference FPS: {frame_nr/total_inference_duration:.2f}." ) ``` ### Show Side-by-Side Video of Bicubic and Superresolution Version ``` if not comparison_video_path.exists(): raise ValueError("The comparison video does not exist.") else: video_link = FileLink(comparison_video_path) video_link.html_link_str = "<a href='%s' download>%s</a>" display( HTML( f"Showing side by side comparison. If you cannot see the video in " "your browser, please click on the following link to download " f"the video<br>{video_link._repr_html_()}" ) ) display(Video(comparison_video_path, width=800, embed=True)) ```
github_jupyter
**Reinforcement Learning with TensorFlow & TRFL: Q Learning** * This notebook shows how to apply the classic Reinforcement Learning (RL) idea of Q learning with TRFL. * In TD learning we estimated state values: V(s). In Q learning we estimate action values: Q(s,a). Here we'll go over Q learning in the simple tabular case. Next section we will use this same Q learning function in powerful Deep Learning algorithms like Deep Q Network. * A key concept in RL is exploration. We'll introduce and use epsilon greedy exploration, which is often used with Q learning. Outline: 1. Install TRFL 2. Define the GridWorld environment 3. Discuss Epsilon-Greedy Exploration 4. Find the value of each state-action value in the environment using Q learning ``` #TRFL has issues on Colab with TensorFlow version tensorflow-1.13.0rc1 #install TensorFlow 1.12 and restart run time !pip install tensorflow==1.12 import os os.kill(os.getpid(), 9) #install TRFL !pip install trfl==1.0 #install Tensorflow Probability !pip install tensorflow-probability==0.5.0 ``` **GridWorld** The GridWorld environment is a four by four grid. The agent randomly starts on the grid and can move either up, left, right, or down. If the agent reaches the upper left or lower right the episode is over. Every action the agent takes gets a reward of -1 until you reach the upper left or over right. ``` #Environment from: https://github.com/dennybritz/reinforcement-learning/blob/cee9e78652f8ce98d6079282daf20680e5e17c6a/lib/envs/gridworld.py #define the environment import io import numpy as np import sys from gym.envs.toy_text import discrete import pprint UP = 0 RIGHT = 1 DOWN = 2 LEFT = 3 class GridworldEnv(discrete.DiscreteEnv): """ Grid World environment from Sutton's Reinforcement Learning book chapter 4. You are an agent on an MxN grid and your goal is to reach the terminal state at the top left or the bottom right corner. For example, a 4x4 grid looks as follows: T o o o o x o o o o o o o o o T x is your position and T are the two terminal states. You can take actions in each direction (UP=0, RIGHT=1, DOWN=2, LEFT=3). Actions going off the edge leave you in your current state. You receive a reward of -1 at each step until you reach a terminal state. """ metadata = {'render.modes': ['human', 'ansi']} def __init__(self, shape=[4,4]): if not isinstance(shape, (list, tuple)) or not len(shape) == 2: raise ValueError('shape argument must be a list/tuple of length 2') self.shape = shape nS = np.prod(shape) nA = 4 MAX_Y = shape[0] MAX_X = shape[1] P = {} grid = np.arange(nS).reshape(shape) it = np.nditer(grid, flags=['multi_index']) while not it.finished: s = it.iterindex y, x = it.multi_index # P[s][a] = (prob, next_state, reward, is_done) P[s] = {a : [] for a in range(nA)} is_done = lambda s: s == 0 or s == (nS - 1) reward = 0.0 if is_done(s) else -1.0 #reward = 1.0 if is_done(s) else 0.0 # We're stuck in a terminal state if is_done(s): P[s][UP] = [(1.0, s, reward, True)] P[s][RIGHT] = [(1.0, s, reward, True)] P[s][DOWN] = [(1.0, s, reward, True)] P[s][LEFT] = [(1.0, s, reward, True)] # Not a terminal state else: ns_up = s if y == 0 else s - MAX_X ns_right = s if x == (MAX_X - 1) else s + 1 ns_down = s if y == (MAX_Y - 1) else s + MAX_X ns_left = s if x == 0 else s - 1 P[s][UP] = [(1.0, ns_up, reward, is_done(ns_up))] P[s][RIGHT] = [(1.0, ns_right, reward, is_done(ns_right))] P[s][DOWN] = [(1.0, ns_down, reward, is_done(ns_down))] P[s][LEFT] = [(1.0, ns_left, reward, is_done(ns_left))] it.iternext() # Initial state distribution is uniform isd = np.ones(nS) / nS # We expose the model of the environment for educational purposes # This should not be used in any model-free learning algorithm self.P = P super(GridworldEnv, self).__init__(nS, nA, P, isd) def _render(self, mode='human', close=False): """ Renders the current gridworld layout For example, a 4x4 grid with the mode="human" looks like: T o o o o x o o o o o o o o o T where x is your position and T are the two terminal states. """ if close: return outfile = io.StringIO() if mode == 'ansi' else sys.stdout grid = np.arange(self.nS).reshape(self.shape) it = np.nditer(grid, flags=['multi_index']) while not it.finished: s = it.iterindex y, x = it.multi_index if self.s == s: output = " x " elif s == 0 or s == self.nS - 1: output = " T " else: output = " o " if x == 0: output = output.lstrip() if x == self.shape[1] - 1: output = output.rstrip() outfile.write(output) if x == self.shape[1] - 1: outfile.write("\n") it.iternext() pp = pprint.PrettyPrinter(indent=2) ``` **An Introduction to Exploration: Epsilon-Greedy Exploration** Exploration is a key concept in RL. In order to find the best policies, an agent needs to explore the environment. By exploring, the agent can experience new states and rewards. In the last notebook, the agent explored GridWorld by taking a random action at every step. While random action explorations can work in some environments, the downside is the agent can spend too much time exploring bad states or states that have already been explored fully and not enough time exploring promising states. A simple--yet surprisingly effective--approach to exploration is Epsilon-Greedy exploration. A epsilon percentage of the time, the agent chooses a random action. The remaining amount of the time (1-epsilon) the agent choose the best estimated action aka the* greedy action*. Epsilon can be a fixed value between 0 and 1 or can start at a high value and gradually decay over time (ie start at .99 and decay to 0.01). In this notebook we will used a fixed epsilon value of 0.1. Below is a simple example of epsilon-greedy exploration. ``` #declare the environment env = GridworldEnv() #reset the environment and get the agent's current position (observation) current_state = env.reset() env._render() print("") action_dict = {0:"UP",1:"RIGHT", 2:"DOWN",3:"LEFT"} greedy_dict = {0:3,1:3,2:3,3:3, 4:0,5:0,6:0,7:0, 8:2,9:2,10:2,11:2, 12:1,13:1,14:1,15:1} epsilon = 0.1 for i in range(10): #choose random action epsilon amount of the time if np.random.rand() < epsilon: action = env.action_space.sample() action_type = "random" else: #Choose a greedy action. We will learn greedy actions with Q learning in the following cells. action = greedy_dict[current_state] action_type = "greedy" current_state,reward,done,info = env.step(action) print("Agent took {} action {} and is now in state {} ".format(action_type, action_dict[action], current_state)) env._render() print("") if done: print("Agent reached end of episode, resetting the env") print(env.reset()) print("") env._render() print("") ``` ** TRFL Usage ** Once again, the three main TRFL steps are: 1. In the TensorFlow graph, define the necessary TensorFlow tensors 2. In the graph, feed the tensors into the trfl method 3. In the TensorFlow session, run the graph operation We saw this in the last notebook. Here in Q learning there are some slight differences. We use the trfl.qlearning() method and we input the action and action values (instead of state values) into the method. Note for the action values q_t and q_next_t the shape is batch size X number of actions. ``` #set up TRFL graph import tensorflow as tf import trfl #https://github.com/deepmind/trfl/blob/master/docs/trfl.md#qlearningq_tm1-a_tm1-r_t-pcont_t-q_t-nameqlearning # Args: # q_tm1: Tensor holding Q-values for first timestep in a batch of transitions, shape [B x num_actions]. # a_tm1: Tensor holding action indices, shape [B]. # r_t: Tensor holding rewards, shape [B]. # pcont_t: Tensor holding pcontinue values, shape [B]. # q_t: Tensor holding Q-values for second timestep in a batch of transitions, shape [B x num_actions]. # name: name to prefix ops created within this op. num_actions = env.action_space.n batch_size = 1 q_t = tf.placeholder(dtype=tf.float32,shape=[batch_size,num_actions],name="q_value") action_t = tf.placeholder(dtype=tf.int32,shape=[batch_size],name="action") reward_t = tf.placeholder(dtype=tf.float32,shape=[batch_size],name='reward') gamma_t = tf.placeholder(dtype=tf.float32,shape=[batch_size],name='discount_factor') q_next_t= tf.placeholder(dtype=tf.float32,shape=[batch_size,num_actions],name='q_next_value') qloss_t, q_extra_t = trfl.qlearning(q_t,action_t,reward_t,gamma_t,q_next_t) ``` ** The RL Training Loop ** In the next cell we are going to define the training loop and then run it in the following cell. The goal is to estimate the action value of each state (the value of each state-action combination) using Q learning. action_value_array holds the estimated values. After each step the agent takes in the env, we update the action_value_array with the Q learning formula. ** TRFL Usage ** The TRFL usage here is to run the trfl operation q_learning_t in sess.run(). We then take the output (q_learning_output) and extract the td_error part of that tensor. Using the td_error we update the action_value_array. For reference, the code below shows the full output of trfl.qlearning and the classic RL method of performing tabular Q learning updates. ``` def q_learning_action_value_estimate(env,episodes=1000,alpha=0.05,discount_factor=1.0,epsilon=0.1): """ Args: env: OpenAI env. env.P represents the transition probabilities of the environment. env.P[s][a] is a list of transition tuples (prob, next_state, reward, done). env.nS is a number of states in the environment. env.nA is a number of actions in the environment. episodes: number of episodes to run alpha: learning rate for state value updates discount_factor: Gamma discount factor. pcont_t TRFL argument Returns: Value of each state with random policy """ with tf.Session() as sess: #initialize the estimated state values to zero action_value_array = np.zeros((env.nS,env.nA)) #reset the env current_state = env.reset() #env._render() #run through each episode taking a random action each time #upgrade estimated state value after each action current_episode = 0 while current_episode < episodes: #choose action based on epsilon-greedy policy if np.random.rand() < epsilon: eg_action = env.action_space.sample() else: #Choose a greedy action. We will learn greedy actions with Q learning in the following cells. eg_action = np.argmax(action_value_array[current_state]) #take a step using epsilon-greedy action next_state, rew, done, info = env.step(eg_action) #run TRFL operation in the session q_learning_output = sess.run([q_extra_t],feed_dict={q_t:np.expand_dims(action_value_array[current_state],axis=0), action_t:np.expand_dims(eg_action,axis=0), reward_t:np.expand_dims(rew,axis=0), gamma_t:np.expand_dims(discount_factor,axis=0), q_next_t:np.expand_dims(action_value_array[next_state],axis=0)}) # trfl.qlearning() returns: # A namedtuple with fields: # loss: a tensor containing the batch of losses, shape [B]. # extra: a namedtuple with fields: # target: batch of target values for q_tm1[a_tm1], shape [B]. # td_error: batch of temporal difference errors, shape [B]. # Here we are using the td_error to update our action values. We will use the loss with a gradient descent optimizer in Deep Q Network session. #Use the Q learning TD error to update estimated state-action values action_value_array[current_state,eg_action] = action_value_array[current_state,eg_action] + alpha * q_learning_output[0].td_error #For reference, here is the tabular Q learning update method # max_q_value = np.max(action_value_array[next_state]) # action_value_array[current_state,eg_action] = action_value_array[current_state,eg_action] + \ # alpha * (rew + discount_factor*max_q_value - action_value_array[current_state,eg_action]) #if the epsiode is done, reset the env, if not the next state becomes the current state and the loop repeats if done: current_state = env.reset() current_episode += 1 else: current_state = next_state return action_value_array #run episodes with Q learning and get the state value estimates action_values = q_learning_action_value_estimate(env,episodes=2000,alpha=0.1) print("All Action Value Estimates:") print(np.round(action_values.reshape((16,4)),1)) print("each row is a state, each column is an action") print("") optimal_action_estimates = np.max(action_values,axis=1) print("Optimal Action Value Estimates:") print(np.round(optimal_action_estimates.reshape(env.shape),1)) print("estimate of the optimal State value at each state") print("") ``` The first output shows the estimated value for each action in each state. Ie row 4 column 4 is the value if the agent was in the upper right grid cell and took that action left. In the second output, we take the best action for each of the 16 states and show the agent's estimate of the state value assuming the agent always acts greedily. ``` ```
github_jupyter
# Tensorboard example ``` import time from collections import namedtuple import numpy as np import tensorflow as tf with open('anna.txt', 'r') as f: text=f.read() vocab = set(text) vocab_to_int = {c: i for i, c in enumerate(vocab)} int_to_vocab = dict(enumerate(vocab)) encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32) text[:100] encoded[:100] ``` Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from. ``` len(vocab) def get_batches(arr, n_seqs, n_steps_per_seq): '''Create a generator that returns batches of size n_seqs x n_steps from arr. Arguments --------- arr: Array you want to make batches from n_seqs: Batch size, the number of sequences per batch n_steps: Number of sequence steps per batch ''' # Get the batch size and number of batches we can make # ie n_seq = 10, n_steps_per_sew = 2, batch_size = 20 batch_size = n_seqs * n_steps_per_seq # ie arr= 40, over 20, so 2 batches n_batches = len(arr) // batch_size # Keep only enough characters to make full batches # n_batches = 2 * batch_size = 20 = 40?? # why not simply use len(arr)? arr = arr[ : n_batches * batch_size] # Reshape into n_seqs rows arr = arr.reshape((n_seqs, -1)) for n in range(0, arr.shape[1], n_steps_per_seq): # The features x = arr[ :, n: n + n_steps_per_seq] # The targets, shifted by one y = np.zeros_like(x) y[ :, : -1], y[ : , -1] = x[ :, 1: ], x[ :, 0] yield x, y batches = get_batches(encoded, 10, 50) x, y = next(batches) def build_inputs(batch_size, num_steps): ''' Define placeholders for inputs, targets, and dropout Arguments --------- batch_size: Batch size, number of sequences per batch num_steps: Number of sequence steps in a batch ''' with tf.name_scope('inputs'): # Declare placeholders we'll feed into the graph inputs = tf.placeholder(tf.int32, (batch_size, num_steps), name="inputs") targets = tf.placeholder(tf.int32, (batch_size, num_steps), name="targets") # Keep probability placeholder for drop out layers keep_prob = tf.placeholder(tf.float32, name='keep_prob') return inputs, targets, keep_prob def single_lstm_cell(lstm_size, keep_prob): with tf.name_scope("RNN_layers"): lstm = tf.contrib.rnn.NASCell(lstm_size, reuse = tf.get_variable_scope().reuse) # Add dropout to the cell outputs drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob = keep_prob) return drop def build_lstm(lstm_size, num_layers, batch_size, keep_prob): ''' Build LSTM cell. Arguments --------- keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability lstm_size: Size of the hidden layers in the LSTM cells num_layers: Number of LSTM layers batch_size: Batch size ''' ### Build the LSTM Cell # Stack up multiple LSTM layers, for deep learning with tf.name_scope("RNN_layers"): rnn_cells = tf.contrib.rnn.MultiRNNCell([single_lstm_cell(lstm_size, keep_prob) for _ in range(num_layers)], state_is_tuple = True) with tf.name_scope("RNN_init_state"): initial_state = rnn_cells.zero_state(batch_size, tf.float32) return rnn_cells, initial_state def build_output(lstm_output, in_size, out_size): ''' Build a softmax layer, return the softmax output and logits. Arguments --------- lstm_output: List of output tensors from the LSTM layer in_size: Size of the input tensor, for example, size of the LSTM cells out_size: Size of this softmax layer ''' # Reshape output so it's a bunch of rows, one row for each step for each sequence. # Concatenate lstm_output over axis 1 (the columns) # ie t1 = t1 = [[1, 2, 3], [4, 5, 6]] # t2 = [[7, 8, 9], [10, 11, 12]] # tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]] seq_output = tf.concat(lstm_output, axis=1) # Reshape seq_output to a 2D tensor with lstm_size columns x = tf.reshape(lstm_output, [-1, in_size]) # Connect the RNN outputs to a softmax layer with tf.variable_scope('softmax'): # Create the weight and bias variables here softmax_w = tf.Variable(tf.truncated_normal( (in_size, out_size), stddev=0.1)) softmax_b = tf.Variable(tf.zeros( out_size )) # tensorboard tf.summary.histogram("softmax_w", softmax_w) # Since output is a bunch of rows of RNN cell outputs, logits will be a bunch # of rows of logit outputs, one for each step and sequence logits = tf.matmul(x, softmax_w) + softmax_b # Use softmax to get the probabilities for predicted characters out = tf.nn.softmax(logits, name="predictions") tf.summary.histogram("predictions", out) return out, logits def build_loss(logits, targets, lstm_size, num_classes): ''' Calculate the loss from the logits and the targets. Arguments --------- logits: Logits from final fully connected layer targets: Targets for supervised learning lstm_size: Number of LSTM hidden units num_classes: Number of classes in targets ''' # One-hot encode targets and reshape to match logits, one row per sequence per step y_one_hot = tf.one_hot(targets, num_classes) y_reshaped = tf.reshape( y_one_hot, logits.get_shape() ) # Softmax cross entropy loss loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped) loss = tf.reduce_mean(loss) # tensorboard tf.summary.scalar('loss', loss) return loss def build_optimizer(loss, learning_rate, grad_clip): ''' Build optmizer for training, using gradient clipping. Arguments: loss: Network loss learning_rate: Learning rate for optimizer ''' # Optimizer for training, using gradient clipping to control exploding gradients tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip) train_op = tf.train.AdamOptimizer(learning_rate) optimizer = train_op.apply_gradients(zip(grads, tvars)) return optimizer class CharRNN: def __init__(self, num_classes, batch_size=64, num_steps=50, lstm_size=128, num_layers=2, learning_rate=0.001, grad_clip=5, sampling=False): # When we're using this network for sampling later, we'll be passing in # one character at a time, so providing an option for that if sampling == True: batch_size, num_steps = 1, 1 else: batch_size, num_steps = batch_size, num_steps tf.reset_default_graph() # Build the input placeholder tensors self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps) x_one_hot = tf.one_hot(self.inputs, num_classes, name="x_one_hot") with tf.name_scope("RNN_layers"): # Build the LSTM cell cells, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob) ### Run the data through the RNN layers with tf.name_scope("RNN_forward"): # Run each sequence step through the RNN with tf.nn.dynamic_rnn outputs, state = tf.nn.dynamic_rnn(cells, x_one_hot, initial_state=self.initial_state) self.final_state = state # Get softmax predictions and logits self.prediction, self.logits = build_output(outputs, lstm_size, num_classes) # Loss and optimizer (with gradient clipping) self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes) self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip) batch_size = 64 # Sequences per batch num_steps = 128 # Number of sequence steps per batch lstm_size = 512 # Size of hidden layers in LSTMs num_layers = 2 # Number of LSTM layers learning_rate = 0.001 # Learning rate keep_prob = 0.5 # Dropout keep probability model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps, lstm_size=lstm_size, num_layers=num_layers, learning_rate=learning_rate) epochs = 3 # Save every N iterations save_every_n = 200 saver = tf.train.Saver(max_to_keep=100) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # Tensoboard train_writer = tf.summary.FileWriter('./logs/1/train', sess.graph) test_writer = tf.summary.FileWriter('./logs/1/test') # Use the line below to load a checkpoint and resume training #saver.restore(sess, 'checkpoints/______.ckpt') counter = 0 for e in range(epochs): # Train network new_state = sess.run(model.initial_state) loss = 0 for x, y in get_batches(encoded, batch_size, num_steps): counter += 1 start = time.time() feed = {model.inputs: x, model.targets: y, model.keep_prob: keep_prob, model.initial_state: new_state} merged = tf.summary.merge_all() # Tensorboard summary, batch_loss, new_state, _ = sess.run([merged, model.loss, model.final_state, model.optimizer], feed_dict=feed) train_writer.add_summary(summary, counter) end = time.time() print('Epoch: {}/{}... '.format(e+1, epochs), 'Training Step: {}... '.format(counter), 'Training loss: {:.4f}... '.format(batch_loss), '{:.4f} sec/batch'.format((end-start))) if (counter % save_every_n == 0): saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size)) saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size)) ``` #### Saved checkpoints Read up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables ``` tf.train.get_checkpoint_state('checkpoints') ``` ## Sampling Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that. The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters. ``` def pick_top_n(preds, vocab_size, top_n=5): p = np.squeeze(preds) p[np.argsort(p)[:-top_n]] = 0 p = p / np.sum(p) c = np.random.choice(vocab_size, 1, p=p)[0] return c def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "): samples = [c for c in prime] model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, checkpoint) new_state = sess.run(model.initial_state) for c in prime: x = np.zeros((1, 1)) x[0,0] = vocab_to_int[c] feed = {model.inputs: x, model.keep_prob: 1., model.initial_state: new_state} preds, new_state = sess.run([model.prediction, model.final_state], feed_dict=feed) c = pick_top_n(preds, len(vocab)) samples.append(int_to_vocab[c]) for i in range(n_samples): x[0,0] = c feed = {model.inputs: x, model.keep_prob: 1., model.initial_state: new_state} preds, new_state = sess.run([model.prediction, model.final_state], feed_dict=feed) c = pick_top_n(preds, len(vocab)) samples.append(int_to_vocab[c]) return ''.join(samples) ``` Here, pass in the path to a checkpoint and sample from the network. ``` tf.train.latest_checkpoint('checkpoints') checkpoint = tf.train.latest_checkpoint('checkpoints') samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far") print(samp) checkpoint = 'checkpoints/i200_l512.ckpt' samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far") print(samp) checkpoint = 'checkpoints/i600_l512.ckpt' samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far") print(samp) checkpoint = 'checkpoints/i1200_l512.ckpt' samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far") print(samp) ```
github_jupyter
# The Python ecosystem - The pandas library The [pandas library](https://pandas.pydata.org/) was created by [Wes McKinney](http://wesmckinney.com/) in 2010. pandas provides **data structures** and **functions** for manipulating, processing, cleaning and crunching data. In the Python ecosystem pandas is the state-of-the-art tool for working with tabular or spreadsheet-like data in which each column may be a different type (`string`, `numeric`, `date`, or otherwise). pandas provides sophisticated indexing functionality to make it easy to reshape, slice and dice, perform aggregations, and select subsets of data. pandas relies on other packages, such as [NumPy](http://www.numpy.org/) and [SciPy](https://scipy.org/scipylib/index.html). Further pandas integrates [matplotlib](https://matplotlib.org/) for plotting. If you are new to pandas we strongly recommend to visit the very well written [__pandas tutorials__](https://pandas.pydata.org/pandas-docs/stable/tutorials.html), which cover all relevant sections for new users to properly get started. Once installed (for details refer to the [documentation](https://pandas.pydata.org/pandas-docs/stable/install.html)), pandas is imported by using the canonical alias `pd`. ``` import pandas as pd ``` The pandas library has two workhorse data structures: __*Series*__ and __*DataFrame*__. * one dimensional `pd.Series` object * two dimensional `pd.DataFrame` object *** ## The `pd.Series` object Data generation ``` # import the random module from numpy from numpy import random # set seed for reproducibility random.seed(123) # generate 26 random integers between -10 and 10 my_data = random.randint(low=-10, high=10, size=26) # print the data my_data ``` A Series is a one-dimensional array-like object containing an array of data and an associated array of data labels, called its _index_. We create a `pd.Series` object by calling the `pd.Series()` function. ``` # Uncomment to look up the documentation # ?pd.Series # docstring # ??pd.Series # source # create a pd.Series object s = pd.Series(data=my_data) s type(s) ``` *** ### `pd.Series` attributes Python objects in general and the `pd.Series` in particular offer useful object-specific *attributes*. * _attribute_ $\to$ `OBJECT.attribute` $\qquad$ _Note that the attribute is called without parenthesis_ ``` s.dtypes s.index ``` We can use the `index` attribute to assign an index to a `pd.Series` object. Consider the letters of the alphabet.... ``` import string letters = string.ascii_uppercase letters ``` By providing an array-type object we assign a new index to the `pd.Series` object. ``` s.index = [l for l in letters] s.index s ``` *** ### `pd.Series` methods Methods are functions that are called using the attribute notation. Hence they are called by appending a dot (`.`) to the Python object, followed by the name of the method, parentheses `()` and in case one or more arguments (`arg`). * _method_ $\to$ `OBJECT.method_name(arg1, arg2, ...)` ``` s.sum() s.mean() s.max() s.min() s.median() s.quantile(q=0.5) s.quantile(q=[0.25, 0.5, 0.75]) ``` *** ### Element-wise arithmetic A very useful feature of `pd.Series` objects is that we may apply arithmetic operations *element-wise*. ``` s*0.1 #s+10 #10/s #s**2 #(2+s)*1**3 ``` *** ### Selection and Indexing Another main data operation is indexing and selecting particular subsets of the data object. pandas comes with a very [rich set of methods](https://pandas.pydata.org/pandas-docs/stable/indexing.html) for these type of tasks. In its simplest form we index a Series numpy-like, by using the `[]` operator to select a particular `index` of the Series. ``` s[3] s[2:6] s["C"] s["C":"K"] ``` *** ## The `pd.DataFrame` object The primary pandas data structure is the `DataFrame`. It is a two-dimensional size-mutable, potentially heterogeneous tabular data structure with both row and column labels. Arithmetic operations align on both row and column labels. Basically, the `DataFrame` can be thought of as a `dictionary`-like container for Series objects. **Generate a `DataFrame` object from scratch** pandas facilitates the import of different data types and sources, however, for the sake of this tutorial we generate a `DataFrame` object from scratch. Source: http://duelingdata.blogspot.de/2016/01/the-beatles.html ``` df = pd.DataFrame({"id" : range(1,5), "Name" : ["John", "Paul", "George", "Ringo"], "Last Name" : ["Lennon", "McCartney", "Harrison", "Star"], "dead" : [True, False, True, False], "year_born" : [1940, 1942, 1943, 1940], "no_of_songs" : [62, 58, 24, 3] }) df ``` *** ### `pd.DataFrame` attributes ``` df.dtypes # axis 0 df.columns # axis 1 df.index ``` *** ### `pd.DataFrame` methods **Get a quick overview of the data set** ``` df.info() df.describe() df.describe(include="all") ``` **Change index to the variable `id`** ``` df df.set_index("id") df ``` Note that nothing changed!! For the purpose of memory and computation efficiency `pandas` returns a view of the object, rather than a copy. Hence, if we want to make a permanent change we have to assign/reassign the object to a variable: df = df.set_index("id") or, some methods have the `inplace=True` argument: df.set_index("id", inplace=True) ``` df = df.set_index("id") df ``` **Arithmetic methods** ``` df.sum() df.sum(axis=1) ``` #### `groupby` method [Hadley Wickham 2011: The Split-Apply-Combine Strategy for Data Analysis, Journal of Statistical Software, 40(1)](https://www.jstatsoft.org/article/view/v040i01) <img src="./_img/split-apply-combine.svg" width="600"> Image source: [Jake VanderPlas 2016, Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/) ``` df df.groupby("dead") df.groupby("dead").sum() df.groupby("dead")["no_of_songs"].sum() df.groupby("dead")["no_of_songs"].mean() df.groupby("dead")["no_of_songs"].agg(["mean", "max", "min"]) ``` #### Family of `apply`/`map` methods * `apply` works on a row (`axis=0`, default) / column (`axis=1`) basis of a `DataFrame` * `applymap` works __element-wise__ on a `DataFrame` * `map` works __element-wise__ on a `Series`. ``` df # (axis=0, default) df[["Last Name", "Name"]].apply(lambda x: x.sum()) # (axis=1) df[["Last Name", "Name"]].apply(lambda x: x.sum(), axis=1) ``` _... maybe a more useful case..._ ``` df.apply(lambda x: " ".join(x[["Name", "Last Name"]]), axis=1) ``` *** ### Selection and Indexing **Column index** ``` df["Name"] df[["Name", "Last Name"]] df.dead ``` **Row index** In addition to the `[]` operator pandas ships with other indexing operators such as `.loc[]` and `.iloc[]`, among others. * `.loc[]` is primarily __label based__, but may also be used with a boolean array. * `iloc[]` is primarily __integer position based__ (from 0 to length-1 of the axis), but may also be used with a boolean array. ``` df.head(2) df.loc[1] df.iloc[1] ``` **Row and Columns indices** `df.loc[row, col]` ``` df.loc[1, "Last Name"] df.loc[2:4, ["Name", "dead"]] ``` **Logical indexing** ``` df df.no_of_songs > 50 df.loc[df.no_of_songs > 50] df.loc[(df.no_of_songs > 50) & (df.year_born >= 1942)] df.loc[(df.no_of_songs > 50) & (df.year_born >= 1942), ["Last Name", "Name"]] ``` *** ### Manipulating columns, rows and particular entries **Add a row to the data set** ``` from numpy import nan df.loc[5] = ["Mouse", "Mickey", nan, nan, 1928] df df.dtypes ``` _Note that the variable `dead` changed. Its values changed from `True`/`False` to `1.0`/`0.0`. Consequently its `dtype` changed from `bool` to `float64`._ **Add a column to the data set** ``` pd.datetime.today() now = pd.datetime.today().year now df["age"] = now - df.year_born df ``` **Change a particular entry** ``` df.loc[5, "Name"] = "Mini" df ``` *** ## Plotting The plotting functionality in pandas is built on top of matplotlib. It is quite convenient to start the visualization process with basic pandas plotting and to switch to matplotlib to customize the pandas visualization. ### `plot` method ``` # this call causes the figures to be plotted below the code cells % matplotlib inline df df[["no_of_songs", "age"]].plot() df["dead"].plot.hist() df["age"].plot.bar() ``` ## ...some notes on plotting with Python Plotting is an essential component of data analysis. However, the Python visualization world can be a frustrating place. There are many different options and choosing the right one is a challenge. (If you dare take a look at the [Python Visualization Landscape](https://github.com/rougier/python-visualization-landscape).) [matplotlib](https://matplotlib.org/) is probably the most well known 2D plotting Python library. It allows to produce publication quality figures in a variety of formats and interactive environments across platforms. However, matplotlib is the cause of frustration due to the complex syntax and due to existence of two interfaces, a __MATLAB like state-based interface__ and an __object-oriented interface__. Hence, __there is always more than one way to build a visualization__. Another source of confusion is that matplotlib is well integrated into other Python libraries, such as [pandas](http://pandas.pydata.org/index.html), [seaborn](http://seaborn.pydata.org/index.html), [xarray](http://xarray.pydata.org/en/stable/), among others. Hence, there is confusion as when to use pure matplotlib or a tool that is built on top of matplotlib. We import the `matplotlib` library and matplotlib's `pyplot` module using the canonical commands import matplotlib as mpl import matplotlib.pyplot as plt With respect to matplotlib terminology it is important to understand that the __`Figure`__ is the final image that may contain one or more axes, and that the __`Axes`__ represents an individual plot. To create a `Figure` object we call fig = plt.figure() However, a more convenient way to create a `Figure` object and an `Axes` object at once, is to call fig, ax = plt.subplots() Then we can use the `Axes` object to add data for ploting. ``` import matplotlib.pyplot as plt # create a Figure and Axes object fig, ax = plt.subplots(figsize=(10,5)) # plot the data and reference the Axes object df["age"].plot.bar(ax=ax) # add some customization to the Axes object ax.set_xticklabels(df.Name, rotation=0) ax.set_xlabel("") ax.set_ylabel("Age", size=14) ax.set_title("The Beatles and ... something else", size=18); ``` Note that we are only scratching the surface of the plotting capabilities with pandas. Refer to the pandas online documentation ([here](https://pandas.pydata.org/pandas-docs/stable/visualization.html)) for a comprehensive overview.
github_jupyter
# Run AwareDX ad-hoc on any drug and adverse event ``` from os import path from collections import Counter, defaultdict from tqdm.notebook import tqdm import numpy as np import pandas as pd import feather import scipy.stats from scipy import stats import pymysql import pymysql.cursors from database import Database from utils import Utils from drug import Drug u = Utils() db = Database('Mimir from Munnin') np.random.seed(u.RANDOM_STATE) def compile(results): results = results.dropna() results = results.reset_index() num_tests = results.shape[0] results.loc[:,'bonf_p_value'] = results.get('p_value') * num_tests #results = results.query('bonf_p_value<1') drug_adr_pairs = results.get(['drug','itr','adr']).groupby(by=['drug','adr']).count().query('itr==25').reset_index().get(['drug', 'adr']) scores = pd.DataFrame(columns=['drug', 'adr', 'p_val_min', 'p_val_med', 'p_val_max', 'logROR_avg','logROR_ci95_low', 'logROR_ci95_upp']).set_index(['drug','adr']) def mean_confidence_interval(data, confidence=0.95): a = 1.0 * np.array(data) n = len(a) m, se = np.mean(a), scipy.stats.sem(a) h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1) return m, m-h, m+h for _, (drug, adr) in tqdm(drug_adr_pairs.iterrows(), total=drug_adr_pairs.shape[0]): data = results.query('drug==@drug and adr==@adr') bonf_p = data['bonf_p_value'].values scores.at[(drug, adr), 'p_val_min'] = np.min(bonf_p) scores.at[(drug, adr), 'p_val_med'] = np.median(bonf_p) scores.at[(drug, adr), 'p_val_max'] = np.max(bonf_p) logROR = data['logROR'].values mean, lower, upper = mean_confidence_interval(logROR) scores.at[(drug, adr), 'logROR_avg'] = mean scores.at[(drug, adr), 'logROR_ci95_low'] = lower scores.at[(drug, adr), 'logROR_ci95_upp'] = upper scores = scores.reset_index() name_atc4, name_atc5, name_hlgt, name_soc, name_pt = defaultdict(str), defaultdict(str), defaultdict(str), defaultdict(str), defaultdict(str) for id_, name in db.run('select * from atc_4_name'): name_atc4[str(id_)] = name for id_, name in db.run('select * from atc_5_name'): name_atc5[str(id_)] = name for id_, name in db.run('select * from hlgt_name'): name_hlgt[id_] = name for id_, name in db.run('select * from soc_name'): name_soc[id_] = name for id_, name in db.run('select * from pt_name'): name_pt[id_] = name scores['drug_name'] = '' scores['drug_class'] = 0 scores = scores.set_index('drug') for id_ in np.unique(scores.index): if name_atc4[id_]: scores.at[id_, 'drug_name'] = name_atc4[id_] scores.at[id_, 'drug_class'] = 4 else: scores.at[id_, 'drug_name'] = name_atc5[id_] scores.at[id_, 'drug_class'] = 5 scores = scores.reset_index() scores['adr_name'] = '' scores['adr_class'] = '' scores = scores.set_index('adr') for id_ in np.unique(scores.index): if name_soc[id_]: scores.at[id_, 'adr_name'] = name_soc[id_] scores.at[id_, 'adr_class'] = 'soc' elif name_hlgt[id_]: scores.at[id_, 'adr_name'] = name_hlgt[id_] scores.at[id_, 'adr_class'] = 'hlgt' elif name_pt[id_]: scores.at[id_, 'adr_name'] = name_pt[id_] scores.at[id_, 'adr_class'] = 'pt' scores = scores.reset_index() return scores drug_name = input(' Enter ATC drug name: ') q_atc5 = "select atc_5_id from atc_5_name where atc_5_name=\'"+drug_name+"\'" q_atc4 = "select atc_4_id from atc_4_name where atc_4_name=\'"+drug_name+"\'" try: if db.get_list(q_atc5): drugID = db.get_list(q_atc5)[0] else: drugID = db.get_list(q_atc4)[0] except: raise NameError("drug not found") if not drugID: raise NameError("drug not found") adr_name = input(' Enter MedDRA outcome name: ') q = "select meddra_concept_id from pt_name where meddra_concept_name=\'"+adr_name+"\'" try: adrID = db.get_list(q) except: raise NameError("adr not found") if not adrID: raise NameError("adr not found") filename = 'Ad_Hoc/'+str(drugID)+'_'+str(adrID) print("Checking for {}".format(filename)) if path.exists(u.DATA_PATH+filename+'.feather'): results = u.load_df(filename) print("Found!") else: print("Not found, running ad-hoc") iterations=25 drug = Drug(drugID, adrID) for itr in tqdm(range(1, iterations+1)): drug.match() drug.count_adr() drug.assign_abcd(itr) drug.do_chi_square() drug.calc_logROR() drug.reset_for_next_itr() assert drug.ensure_results(itr) results = compile(drug.results) u.save_df(results, filename) u.print_table(results) results ```
github_jupyter
[View in Colaboratory](https://colab.research.google.com/github/nishi1612/SC374-Computational-and-Numerical-Methods/blob/master/Set_3.ipynb) Set 3 --- **Finding roots of polynomial by bisection method** ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import math from google.colab import files def iterations(n, arr , i): plt.plot(range(n),arr) plt.xlabel('No. of iterations') plt.ylabel('Value of c') plt.grid(True) plt.savefig("Iterations" + str(i) + ".png") files.download("Iterations" + str(i) + ".png") plt.show() def graph(i): plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.legend(loc='upper right') plt.savefig("Graph" + str(i) + ".png") files.download("Graph" + str(i) + ".png") plt.show() def bissection( a,b,epsilon,k): table = pd.DataFrame(columns=['a','b','c','b-c','f(a)*f(c)','Assign']) c = (a+b)/2; dist = b-c; i = 0 arr = [] while(dist>epsilon): ans_a = func(a,k); ans_b = func(b,k); ans_c = func(c,k); ans = "" if(ans_a*ans_c < 0): b=c; ans = "b=c" else: a=c; ans = "a=c"; table.loc[i] = [a,b,c,dist,ans_a*ans_c,ans] arr.append(c) i = i+1 c = (a+b) / 2 dist = b-c return (a+b)/2 ,i , arr , table; def func(x,k): if k==1: return x**6 - x - 1; elif k==2: return x**3 - x**2 - x - 1; elif k==3: return x - 1 - 0.3*math.cos(x); elif k==4: return 0.5 + math.sin(x) - math.cos(x); elif k==5: return x - math.e**(-x); elif k==6: return math.e**(-x) - math.sin(x); elif k==7: return x**3 - 2*x - 2; elif k==8: return x**4 - x - 1; elif k==9: return math.e**(x) - x - 2; elif k==10: return 1- x + math.sin(x); elif k==11: return x - math.tan(x); x = np.arange(-2,3,0.001) plt.plot(x,x**6,label='$x^6$') plt.plot(x,x+1,label="x+1") graph(1) plt.plot(x**6-x-1,label='$x^6$ - x - 1') graph(1) a , n , arr , table = bissection(1,2,0.001,1) iterations(n,arr,1) print(str(a) + "\n" + str(func(a,1))) table b , n , arr , table = bissection(-1,0,0.001,1) iterations(n,arr,1) print(str(b) + "\n" + str(func(b,1))) table x = np.arange(-2,3,0.001) plt.plot(x,x**3,label='$x^3$') plt.plot(x,x**2 + x + 1,label='$x^2 + x + 1$') graph(2) plt.plot(x**3 - (x**2 + x + 1),label='$x^3 - x^2 - x - 1$') graph(2) a , n , arr, table = bissection(1,2,0.0001,2) iterations(n,arr,2) print(str(a) + "\n" + str(func(a,2))) table x = np.arange(-3,5,0.001) plt.plot(x,x-1,label='$x-1$') plt.plot(x,0.3*np.cos(x),label='$0.3cos(x)$') graph(3) plt.plot(x,x-1-0.3*np.cos(x) , label='$x - 1 - 0.3cos(x)$') graph(3) a , n , arr , table = bissection(0,2,0.0001,3) iterations(n,arr,3) print(str(a) + "\n" + str(func(a,3))) table x = np.arange(-10,10,0.001) plt.plot(x,0.5 + np.sin(x),label='$0.5 + sin(x)$') plt.plot(x,np.cos(x),label='$cos(x)$') graph(4) plt.plot(x,0.5 + np.sin(x) - np.cos(x),label='$0.5 + sin(x) - cos(x)$') graph(4) a , n , arr , table = bissection(0,2,0.0001,4) iterations(n,arr,4) print(str(a) + "\n" + str(func(a,4))) table x = np.arange(-0,5,0.001) plt.plot(x,x,label='$x$') plt.plot(x,np.e**(-x),label='$e^{-x}$') graph(5) plt.plot(x,x - np.e**(-x),label='$x - e^{-x}$') graph(5) a , n , arr , table = bissection(0,1,0.0001,5) iterations(n,arr,5) print(str(a) + "\n" + str(func(a,5))) table x = np.arange(0,5,0.001) plt.plot(x,np.sin(x),label='$sin(x)$') plt.plot(x,np.e**(-x),label='$e^{-x}$') graph(6) plt.plot(x,np.sin(x) - np.e**(-x),label='$sin(x) - e^{-x}$') graph(6) a , n , arr , table = bissection(0,1,0.0001,6) iterations(n,arr,6) print(str(a) + "\n" + str(func(a,6))) table a , n , arr , table = bissection(3,4,0.0001,6) iterations(n,arr,6) print(str(a) + "\n" + str(func(a,6))) table x = np.arange(-2,4,0.001) plt.plot(x,x**3,label='$x^3$') plt.plot(x,2*x+2,label='$2x + 2$') graph(7) plt.plot(x,x**3 - 2*x - 2,label='$x^3 - 2x - 2$') graph(7) a , n , arr , table = bissection(1,2,0.0001,7) iterations(n,arr,7) print(str(a) + "\n" + str(func(a,7))) table x = np.arange(-2,4,0.001) plt.plot(x,x**4,label='$x^4$') plt.plot(x,x+1,label='$x+1$') graph(8) plt.plot(x,x**4 - x - 1,label='$x^4 - x - 1$') graph(8) a , n , arr , table = bissection(-1,0,0.0001,8) iterations(n,arr,8) print(str(a) + "\n" + str(func(a,8))) table a , n , arr , table = bissection(1,2,0.0001,8) iterations(n,arr,8) print(str(a) + "\n" + str(func(a,8))) table x = np.arange(-5,4,0.001) plt.plot(x,np.e**(x),label='$e^x$') plt.plot(x,x+2,label='$x+2$') graph(9) plt.plot(x,np.e**(x) - x - 2,label='$e^2 - x - 2$') graph(9) a , n , arr , table = bissection(1,2,0.0001,9) iterations(n,arr,9) print(str(a) + "\n" + str(func(a,9))) table x = np.arange(-5,4,0.001) plt.plot(x,-np.sin(x),label='$-sin(x)$') plt.plot(x,1-x,label='$1 - x$') graph(10) plt.plot(x,-np.sin(x) - 1 + x,label='$-sin(x) - 1 + x$') graph(10) a , n , arr , table = bissection(0,2,0.0001,10) iterations(n,arr,10) print(str(a) + "\n" + str(func(a,10))) table x = np.arange(-10,10,.001) plt.plot(np.tan(x),label='$tan(x)$') plt.plot(x,label='$x$') graph(11) plt.plot(np.tan(x) - x,label='$x - tan(x)$') graph(11) a , n , arr , table = bissection(4,5,0.0001,11) iterations(n,arr,11) print(str(a) + "\n" + str(func(a,11))) table a , n , arr , table = bissection(80,120,0.0001,11) iterations(n,arr,11) print(str(a) + "\n" + str(func(a,11))) table ```
github_jupyter
# Compiling and running C programs As in [the example](https://github.com/tweag/funflow/tree/v1.5.0/funflow-examples/compile-and-run-c-files) in funflow version 1, we can construct a `Flow` which compiles and executes a C program. As in the older versions of this example, we will use the `gcc` Docker image to run our compilation step. ``` :opt no-lint {-# LANGUAGE Arrows #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE QuasiQuotes #-} -- Funflow libraries import qualified Data.CAS.ContentStore as CS import Funflow ( Flow, dockerFlow, ioFlow, getDirFlow, pureFlow, putDirFlow, runFlow, ) import qualified Funflow.Tasks.Docker as DE -- Other libraries import Path (toFilePath, Abs, Dir, Path, File, absdir, parseAbsDir, relfile, reldir, (</>)) import System.Directory (getCurrentDirectory) import System.Process (runCommand, ProcessHandle) ``` Similar to in Funflow version 1.x, inputs to Docker tasks are mounted in from the content store. This means that we need to copy our example c files to the content store before we can compile them: ``` -- | Helper for getting the absolute path to the src directory srcDir :: () -> IO (Path Abs Dir) srcDir _ = do cwd <- getCurrentDirectory cwdAbs <- parseAbsDir cwd return $ cwdAbs </> [reldir|./src|] -- | A `Flow` which copies the c sources to the content store copyExampleToStore :: Flow () CS.Item copyExampleToStore = proc _ -> do exampleDir <- ioFlow srcDir -< () putDirFlow -< exampleDir ``` Now we can define a task which compiles the example C files using `gcc`: ``` config :: DE.DockerTaskConfig config = DE.DockerTaskConfig { DE.image = "gcc:9.3.0", DE.command = "gcc", DE.args = [ "/example/double.c", "/example/square.c", "/example/main.c"] } -- | Compile our C program and get the path to the output executable compile :: Flow CS.Item CS.Item compile = proc exampleItem -> do -- Define a volume for the example directory let exampleVolume = DE.VolumeBinding {DE.item = exampleItem, DE.mount = [absdir|/example/|]} dockerFlow config -< DE.DockerTaskInput {DE.inputBindings = [exampleVolume], DE.argsVals = mempty} ``` And finally, we can construct our full Flow graph and execute it! ``` flow :: Flow Integer ProcessHandle flow = proc input -> do -- 1. Add the example to the content store example <- copyExampleToStore -< () -- 2. Compile the C sources and get the path to the new executable output <- compile -< example outputDir <- getDirFlow -< output exe <- pureFlow (\x -> toFilePath (x </> [relfile|a.out|])) -< outputDir -- 3. Call the executable command <- pureFlow (\(c, n) -> c <> " " <> show n) -< (exe, input) ioFlow runCommand -< command -- Our C program defined in `src/main.c` defines a function f(x) = 2*x + x^2 -- For input 3 this should output 15. runFlow flow 3 :: IO ProcessHandle ```
github_jupyter
## Basic Training UC Berkeley Python Bootcamp ``` print("Hello, world.") ``` # Calculator # > there are `int` and `float` (but not doubles) ``` print(2 + 2) 2 + 2 print(2.1 + 2) 2.1 + 2 == 4.0999999999999996 %run talktools ``` - Python stores floats as their byte representation so is limited by the same 16-bit precision issues as most other languages - In doing calculations, unless you specify otherwise, Python will store the results in the smallest-byte representation > 1. Indentation matters! > 2. When you mess up, Python is gentle > 3. \# starts a comments (until the end of the line) ``` print(2 + 2) 2 + 2 2 # this is a comment and is not printed # this is also a comment ``` &nbsp; ** Calculator ** - In Python 3, there is no distinction between `int` and `long` ``` 42**42 (42**42).bit_length() bin(42**42) ``` Division always leads to a float ``` 2 / 2 2 / 2.0 ``` Note: This is an important difference between Python 2 and Python 3. Old-style division between `int`s can be done with a double slash `\\` ``` 2 // 2 3 // 2 2.5 // 2 # egad, dont do this. ``` There is also `complex` types ``` complex(1,2) 1+2j 1 + 2j - 2j ``` Note: Access to [`decimal`](https://docs.python.org/3/library/decimal.html#module-decimal) (decimal fixed point and floating point arithmetic) and [`fraction`](https://docs.python.org/3/library/fractions.html#module-fractions) types/operations is through built-in `modules`. &nbsp; Let's do some math ``` (3.0*10.0 - 25.0)/5.0 print(3.085e18*1e6) # this is a Megaparsec in units of cm! t = 1.0 # declare a variable t (time) accel = 9.8 # acceleration in units of m/s^2 # distance travelled in time t seconds is 1/2 a*t**2 dist = 0.5*accel*t*t print(dist) # this is the distance in meters dist1 = accel*(t**2)/2 print(dist1) dist2 = 0.5*accel*pow(t,2) print(dist2) ``` - **variables** are assigned on the fly - multiplication, division, exponents as you expect ``` print(6 / 5) ; print(9 / 5) print(6 // 5) ; print(9 // 5) # remember double-slash integer division returns the floor 6 % 5 # mod operator 1 << 2 ## shift: move the number 1 by two bits to the left ## that is make a new number 100 (base 2) 5 >> 1 ## shift: move the number 5 = 101 (base 2) one to ## to the right (10 = 2) x = 2 ; y = 3 ## assign two variables on the same line! x | y ## bitwise OR x ^ y ## exclusive OR (10 ^ 11 = 01) x & y ## bitwise AND x = x ^ y ; print(x) x += 3 ; print(x) x /= 2.0 ; print(x) ``` we'll see a lot more mathy operators and functions later ## Relationships ## ``` # from before dist1 = 4.9 and dist = 4.9 dist1 == dist dist < 10 dist <= 4.9 dist < (10 + 2j) dist < -2.0 dist != 3.1415 ``` &nbsp; ** More on Variables & Types ** ``` 0 == False not False 0.0 == False not (10.0 - 10.0) not -1 not 3.1415 x = None # None is something special. Not true or false None == False None == True False or True False and True float("nan") == True ``` &nbsp; ** More on Variables & Types ** ``` print(type(1)) x = 2 ; type(x) type(2) == type(1) print(type(True)) print(type(type(1))) print(type(pow)) ``` &nbsp; we can test whether something is a certain type with **`isinstance()`** ``` isinstance(1,int) isinstance(1,(int,float)) isinstance("spam",str) isinstance(1.212,int) isinstance(1.212,int) ``` We'll see later than numbers are an instance of an object, which have methods that can act upon itself: ``` (1.212).is_integer() (1.0).is_integer() ``` builtin-types: **`int`**, **`bool`**, **`str`**, **`float`**, **`complex`** # Strings Strings are a sequence of characters - they can be indexed and sliced up as if they were an array - you can glue strings together with + signs Strings are **immutable** (unlike in C), so you cannot change a string in place (this isn't so bad...) Strings can be formatted and compared ``` >>> x = "spam" ; print(type(x)) print("hello!\n...my sire.") "hello!\n...my sire." "wah?!" == 'wah?!' print("'wah?!' said the student") print("\"wah?!\" said the student") ``` backslashes (\\) start special (escape) characters: ``` \n = newline (\r = return) \t = tab \a = bell ``` string literals are defined with double quotes or quotes. The outermost quote type cannot be used inside the string (unless it's escaped with a backslash) See: http://docs.python.org/reference/lexical_analysis.html#string-literals ``` print("\a\a\a") # raw strings don't escape characters print(r'This is a raw string...newlines \r\n are ignored.') # Triple quotes are real useful for multiple line strings y = '''For score and seven minutes ago, you folks all learned some basic mathy stuff with Python and boy were you blown away!''' print(y) ``` - prepending ``r`` makes that string "raw" - triple quotes allow you to compose long strings https://docs.python.org/3.4/reference/lexical_analysis.html#literals ``` print("\N{RIGHT CURLY BRACKET}") print("\N{BLACK HEART SUIT}") ``` http://www.fileformat.info/info/unicode/char/search.htm ``` s = "spam" ; e = "eggs" print(s + e) print("spam" "eggs" "Trumpkins") print(s "eggs") print(s + " and " + e) print(s,"and",e, sep=" ") print("green " + e + " and\n " + s + "\n\t ... and Trumpkins") print(s*3 + e) print(s*3,e,sep="->") print("*"*50) print("spam" == "good") ; print("spam" == "spam") "spam" < "zoo" "s" < "spam" ``` - you can concatenate strings with ``+`` sign - you can do multiple concatenations with the ``*`` sign - strings can be compared ``` print('I want' + 3 + ' eggs and no ' + s) print('I want ' + str(3) + ' eggs and no ' + s) pi = 3.14159 print('I want ' + str(pi) + ' eggs and no ' + s) print(str(True) + ":" + ' I want ' + str(pi) + ' eggs and no ' + s) ``` you must concatenate only strings, coercing ("casting") other variable types to `str` there's a cleaner way to do this, with string formatting. we'll see that tomorrow. ### Getting input from the user: always a string response ``` faren = input("Enter the temperature (in Fahrenheit): ") cent = (5.0/9.0)*(faren - 32.0) faren = float(faren) cent = (5.0/9.0)*(faren - 32.0) ; print(cent) faren = float(input("Enter the temperature (in Fahrenheit): ")) print((5.0/9.0)*(faren - 32.0)) ``` &nbsp; #### We can think of strings as arrays (although, unlike in C you never really need to deal with directly addressing character locations in memory) ``` s ="spam" len(s) len("eggs\n") len("") s[0] s[-1] ``` - ``len()`` gives us the length of an array - strings are zero indexed - can also count backwards We can think of strings as arrays (although, unlike in C you never really need to deal with directly addressing character locations in memory) <img src="https://raw.github.com/profjsb/python-bootcamp/master/Lectures/01_BasicTraining/spam.png"> useful for slicing: indices are between the characters <img src="https://raw.github.com/profjsb/python-bootcamp/master/Lectures/01_BasicTraining/spam.png"> ``` s[0:1] # get every character between 0 and 1 s[1:4] # get every character between 1 and 4 s[-2:-1] ## slicing [m:n] will return abs(n-m) characters s[0:100] # if the index is beyond the len(str), you dont segfault! s[1:] # python runs the index to the end s[:2] # python runs the index to the beginning s[::-1] # print it out backwards ``` s = s[:n] + s[n:] for all n ## Basic Control (Flow) Python has pretty much all of what you use: if...elif...else, for, while As well as: break, continue (within loops) Does not have: case (explicitly), goto Does have: `pass` ### Flow is done within blocks (where indentation matters) ``` x = 1 if x > 0: print("yo") else: print("dude") ``` Note: if you are doing this within the Python interpreter you'll see the ... ``` >>> x = 1 >>> if x > 0: ... print "yo" ... else: ... print "dude" ... yo ``` Note colons & indentations (tabbed or spaced) ``` x = 1 if x > 0: print("yo") else: print("dude") ``` Indentations with the same block must be the same but not within different blocks (though this is ugly) one-liners ``` print("yo" if x > 0 else "dude") ``` a small program... Do Control-C to stop (in Python/IPython) or "Kernel->Interrupt" in IPython notebook ``` x = 1 y = 0 while True: print("yo" if x > 0 else "dude") x *= -1 y += 1 if y > 42: break ``` case statements can be constructed with just a bunch of if, elif,...else ``` if x < 1: print("t") elif x > 100: print("yo") else: print("dude") ``` ordering matters. The first block of `True` in an if/elif gets executed then everything else does not. blocks cannot be empty ``` x = "fried goldfish" if x == "spam for dinner": print("I will destroy the universe") else: # I'm fine with that. I'll do nothing ``` `pass` is a "do nothing" statement ``` if x == "spam for dinner": print("I will destroy the universe") else: # I'm fine with that. I'll do nothing pass ``` The double percent sign at the top of an IPython/Jupyter cell is a cell-level "magic". It's not Python itself, but defined as part of IPython/Jupyter. We'll see more on this later in the bootcamp. ``` %%file temp1.py # set some initial variables. Set the initial temperature low faren = -1000 # we dont want this going on forever, let's make sure we cannot have too many attempts max_attempts = 6 attempt = 0 while faren < 100: # let's get the user to tell us what temperature it is newfaren = float(input("Enter the temperature (in Fahrenheit): ")) if newfaren > faren: print("It's getting hotter") elif newfaren < faren: print("It's getting cooler") else: # nothing has changed, just continue in the loop continue faren = newfaren # now set the current temp to the new temp just entered attempt += 1 # bump up the attempt number if attempt >= max_attempts: # we have to bail out break if attempt >= max_attempts: # we bailed out because of too many attempts print("Too many attempts at raising the temperature.") else: # we got here because it's hot print("it's hot here, people.") %run temp1 %run temp1 %%file temp2.py # set some initial variables. Set the initial temperature low faren = -1000 # we dont want this going on forever, let's make sure we cannot have too many attempts max_attempts = 6 attempt = 0 while faren < 100 and (attempt < max_attempts): # let's get the user to tell us what temperature it is newfaren = float(input("Enter the temperature (in Fahrenheit): ")) if newfaren > faren: print("It's getting hotter") elif newfaren < faren: print("It's getting cooler") else: # nothing has changed, just continue in the loop continue faren = newfaren # now set the current temp to the new temp just entered attempt += 1 # bump up the attempt number if attempt >= max_attempts: # we bailed out because of too many attempts print("Too many attempts at raising the temperature.") else: # we got here because it's hot print("it's hot here, people.") ``` UC Berkeley Python Bootcamp - Basic Training (c) J. Bloom 2008-2016 All Rights Reserved
github_jupyter
# 1millionwomentotech SummerOfCode ## Intro to AI: Week 4 Day 3 ``` print(baby_train[50000]['reviewText']) from nltk.sentiment.vader import SentimentIntensityAnalyzer sia = SentimentIntensityAnalyzer() text = baby_train[50000]['reviewText'] for s in sent_tokenize(text): print(s) print(sia.polarity_scores(s)) def sia_features(dataset): """For each review text in the dataset, extract: (1) the mean positive sentiment over all sentences (2) the mean neutral sentiment over all sentences (3) the mean negative sentiment over all sentences (4) the maximum positive sentiment over all sentences (5) the maximum neutral sentiment over all sentences (6) the maximum negative sentiment over all sentences""" feat_matrix = numpy.empty((len(dataset), 6)) for i in range(len(dataset)): sentences = sent_tokenize(dataset[i]['reviewText']) nsent = len(sentences) if nsent: sentence_polarities = numpy.empty((nsent, 3)) for j in range(nsent): polarity = sia.polarity_scores(sentences[j]) sentence_polarities[j, 0] = polarity['pos'] sentence_polarities[j, 1] = polarity['neu'] sentence_polarities[j, 2] = polarity['neg'] feat_matrix[i, 0:3] = numpy.mean(sentence_polarities, axis=0) # mean over the columns feat_matrix[i, 3:6] = numpy.max(sentence_polarities, axis=0) # maximum over the columns else: feat_matrix[i, 0:6] = 0.0 return feat_matrix sia_tr = sia_features(baby_train) testmat = numpy.arange(12.).reshape((3, 4)) print(testmat) print(numpy.max(testmat, axis=0)) print(numpy.mean(testmat, axis=1)) def len_features(dataset): """Add two features: (1) length of review (in thousands of characters) - truncate at 2,500 (2) percentage of exclamation marks (in %)""" feat_matrix = numpy.empty((len(dataset), 2)) for i in range(len(dataset)): text = dataset[i]['reviewText'] feat_matrix[i, 0] = len(text) / 1000. if text: feat_matrix[i, 1] = 100. * text.count('!') / len(text) else: feat_matrix[i, 1] = 0.0 feat_matrix[feat_matrix>2.5] = 2.5 return feat_matrix len_tr = len_features(baby_train) print(X_train_neg.shape, sia_tr.shape, len_tr.shape) X_train_augmented = numpy.concatenate((X_train_neg, sia_tr, len_tr), axis=1) # stack horizontally lreg_augmented = LinearRegression().fit(X_train_augmented, Y_train) pred_train_augmented = lreg_augmented.predict(X_train_augmented) mae_train_augmented = mean_absolute_error(pred_train_augmented, Y_train) print("Now the mean absolute error on the training data is %f stars" % mae_train_augmented) rf_augmented = RandomForestRegressor().fit(X_train_augmented, Y_train) rfpred_train_augmented = rf_augmented.predict(X_train_augmented) mae_train_rf_augmented = mean_absolute_error(rfpred_train_augmented, Y_train) print("For the RF, it is %f stars" % mae_train_rf_augmented) X_valid_neg = dataset_to_matrix_with_neg(baby_valid) sia_valid = sia_features(baby_valid) len_valid = len_features(baby_valid) X_valid_augmented = numpy.concatenate((X_valid_neg, sia_valid, len_valid), axis=1) pred_valid_augmented = lreg_augmented.predict(X_valid_augmented) pred_valid_rf_augmented = rf_augmented.predict(X_valid_augmented) mae_valid_augmented = mean_absolute_error(pred_valid_augmented, Y_valid) print("On the validation set, we get %f error for the linear regression" % mae_valid_augmented) mae_valid_rf_augmented = mean_absolute_error(pred_valid_rf_augmented, Y_valid) print("And %f for the random forest regression" % mae_valid_rf_augmented) print(baby_train[50000]['reviewText']) from nltk.sentiment.vader import SentimentIntensityAnalyzer sia = SentimentIntensityAnalyzer() text = baby_train[50000]['reviewText'] for s in sent_tokenize(text): print(s) print(sia.polarity_scores(s)) def sia_features(dataset): """For each review text in the dataset, extract: (1) mean positive sentiment over all sentences (2) mean neutral sentiment over all sentences (3) mean negative sentiment over all sentences (4) maximum positive sentiment over all sentences (5) maximum neutral sentiment over all sentences (6) maximum negative sentiment over all sentences """ feat_matrix = numpy.empty((len(dataset), 6)) for i in range(len(dataset)): sentences = sent_tokenize(dataset[i]['reviewText']) nsent = len(sentences) if nsent: sentence_polarities = numpy.empty((nsent, 3)) for j in range(nsent): polarity = sia.polarity_scores(sentences[j]) sentence_polarities[j, 0] = polarity['pos'] sentence_polarities[j, 1] = polarity['neu'] sentence_polarities[j, 2] = polarity['neg'] feat_matrix[i, 0:3] = numpy.mean(sentence_polarities, axis = 0) # mean over the columns feat_matrix[i, 3:6] = numpy.max(sentence_polarities, axis = 0) # maximum over the columns else: feat_matrix[i, 0:6] = 0.0 return feat_matrix sia_tr = sia_features(baby_train) print(sia_tr[:10]) testmat = numpy.arange(12.).reshape((3,4)) print(testmat) print(numpy.max(testmat, axis = 0)) print(numpy.mean(testmat, axis = 1)) # Homework - required for Certification def len_features(dataset): """Add two features: (1) length of review (in thousands of character) - truncate at 2,500 (2) percentage of exclamation marks (in %) """ len_tr = len_features(baby_train) print(X_train_neg.shape, sia_tr.shape) # stack horizontally X_train_augmented = numpy.concatenate( (X_train_neg, sia_tr), axis = 1) lreg_augmented = LinearRegression().fit(X_train_augmented, Y_train) pred_train_augmented = lreg_augmented.predict(X_train_augmented) mae_train_augmented = mean_absolute_error(pred_train_augmented, Y_train) print("Now the mean absolute error on the training data is %f starts" % mae_train_augmented) # random forest rf_augmented = RandomForestRegressor().fit(X_train_augmented, Y_train) rfpred_train_augmented = rf_augmented.predict(X_train_augmented) mae_train_rf_augmented = mean_absolute_error(rfpred_train_augmented, Y_train) print("For the RF, MAE is %f stars" % mae_train_rf_augmented) X_valid_neg = dataset_to_matrix_with_neg(baby_valid) sia_valid = sia_features(baby_valid) # len_valid = X_valid_augmented = numpy.concatenate((X_valid_neg, sia_valid), axis = 1) pred_valid_augmented = pred_valid_rfaugmented = mae_valid_augmented = mae_valid_rfaugmented = ``` # Homework for certification Refactor the code above: - "Be lazy. Not just lazy but proactively, agressively lazy." Remove duplication. - create a single function that takes in data and spits out all success metrics across all of your algos. # Where to go from here? - unigrams (NLTK) - word vector (gensim, [glove](https://nlp.stanford.edu/projects/glove/), word2vec) - recurrent neural net - convolutional neural net https://www.oreilly.com/learning/perform-sentiment-analysis-with-lstms-using-tensorflow http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/ https://machinelearningmastery.com/develop-n-gram-multichannel-convolutional-neural-network-sentiment-analysis/
github_jupyter
# 自动求导的相关设置 - Tensor的属性: - requires_grad=True - 是否用来求导 - is_leaf: - 叶子节点必须是计算的结果; - 用户创建的Tensor的is_leaf=True(尽管requires_grad=True,也is_leaf=True); - requires_grad=False的Tensor的is_leaf=True; - grad_fn: - 用来指定求导函数; - grad - 用来返回导数; - dtype - 只有torch.float的张量才能求导; 1. 求导的例子 ``` import torch # x自变量 x = torch.Tensor([5]) x.requires_grad=True # y因变量 y = x ** 2 # 求导 y.backward() # 导数的结果 print(x.grad) ``` 2. 求导的可视化(导数函数的曲线) ``` %matplotlib inline import matplotlib.pyplot as plt import torch # x自变量 x = torch.linspace(0, 10, 100) x.requires_grad=True # y因变量 y = (x - 5) ** 2 + 3 z = y.sum() # 求导 z.backward() print() # 可视化 plt.plot(x.detach(), y.detach(), color=(1, 0, 0, 1), label='$y=(x-5)^2 + 3$') plt.plot(x.detach(), x.grad.detach(), color=(1, 0, 1, 1), label='$y=2(x-5)$') plt.legend() plt.show() # print(x.grad) # print(x) ``` 3. 求导相关的属性值 ``` import torch # x自变量 x = torch.Tensor([5]) x.requires_grad=True # 求导前的属性 print("-------------求导前x") print("leaf:", x.is_leaf) print("grad_fn:", x.grad_fn) print("grad:", x.grad) # y因变量 y = x ** 2 print("-------------求导前y") print("requires_grad:", y.requires_grad) print("leaf:", y.is_leaf) print("grad_fn:", y.grad_fn) print("grad:", y.grad) # 求导 y.backward() # 只对标量运算 print("-------------求导后x") # 求导后的属性 print("leaf:", x.is_leaf) print("grad_fn:", x.grad_fn) print("grad:", x.grad) print("-------------求导后y") print("requires_grad:", y.requires_grad) print("leaf:", y.is_leaf) print("grad_fn:", y.grad_fn) print("grad:", y.grad) ``` # Tensor的backward函数 ## backward函数定义 - 函数定义: ```python backward(self, gradient=None, retain_graph=None, create_graph=False) ``` - 参数说明: - gradient=None:需要求导的微分张量; - retain_graph=None:保留图;否则每次计算完毕,床创建的图都会被释放。 - create_graph=False:创建导数图,主要用来求高阶导数; ## 求导的通用模式 - 函数表达式: - $z = 2x + 3y$ - 手工求导: - $\dfrac{\partial{z}}{\partial{x}} = 2$ ``` import torch x = torch.Tensor([1, 2, 3]) x.requires_grad=True # 这个属性必须在 z = 2*x + 3*y 表达式构建图的时候设置 y = torch.Tensor([4, 5, 6]) z = 2*x + 3*y z.backward(x) # 对x求导,得到的结果,自然是 2,但是x的grad是 2 * x print(x.grad, y.grad, z.grad) # 没有对y求导,所以对y没有要求 ``` ## 理解导数 - 函数表达式: - $z = x^2$ - 手工求导: - $\dfrac{\partial{z}}{\partial{x}} = 2x$ - $\color{red}{上面过程怎么计算的呢?}$ ### 结果张量为标量的情况 - 如果z是标量,则直接计算导数:$\dfrac{\partial{z}}{\partial{x}} = 2x$ ``` import torch x = torch.Tensor([2]) x.requires_grad=True z = x**2 # 求导函数 z.backward() # 对x求导,2 * x ,导数为2x=4 print(x.grad, z.grad) ``` ### 结果张量为向量的情况 - 如果z是向量,则需要先计算z与x的内积,得到标量结果,然后再求导。 - $z = x^2$ - $l = z \cdot x$ - $\dfrac{\partial{l}}{\partial{x}} = \dfrac{\partial{l}}{\partial{z}} \dfrac{\partial{z}}{\partial{x}} = x \dfrac{\partial{z}}{\partial{x}} = x 2x$ ``` import torch x = torch.Tensor([2]) x.requires_grad=True y = x**2 # 求导函数 y.backward(x) # 2 x x = 8 print(x.grad, y.grad) print(x.grad/x) # 正宗结果 ``` ### 取求导向量为1向量 - 根据上面的推导,在自动求导中包含几个默认动作: - 1. 使用z.backward(),没有指定微分量的情况下,实际上是对图的所有标记为requires_grad=True的叶子张量实现求导; - 当叶子节点都是requires_grad=False,会抛出异常。 - `RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn` - 2. 使用z.backward(x),直接指定需要的求导; - 其实这种指定,是没有意义的,因为指定x,也是对所有requires_grad=True的叶子节点求导。 - 下面例子体会下,多个叶子节点的自动求导; - 就算只对x求导,实际对y也会求导; ``` import torch x = torch.Tensor([1, 2, 3]) y = torch.Tensor([4, 5, 6]) x.requires_grad=True y.requires_grad=True z = 3*x + 2*y # 求导函数 z.backward(x) # 对x求导 print(x.grad, y.grad) # [3., 6., 9.] :导数是3 与 [2., 4., 6.]:导数是2 print(x.grad/x, y.grad/x) # [3., 6., 9.] :导数是3 与 [2., 4., 6.]:导数是2 ``` - 从上面例子看出:backward的参数张量,仅仅是把求导函数从向量转换成标量求导, 本身并没有指定对哪个变量(张量求导的)的含义。 - 由于backward的参数仅仅是向量到变量的转化工作,所以我们去这个参数为1即可。下面是推理理论。 - $z = x^2$ - $l = z \cdot 1$ - $\dfrac{\partial{l}}{\partial{x}} = \dfrac{\partial{l}}{\partial{z}} \dfrac{\partial{z}}{\partial{x}} = \dfrac{\partial{z \cdot 1 }}{\partial{z}} \dfrac{\partial{z}}{\partial{x}} = \dfrac{\partial{z}}{\partial{x}} = 2x$ - 取1张量作为梯度求导 ``` import torch x = torch.Tensor([1, 2, 3]) x.requires_grad=True z = x**2 # 求导函数 z.backward(torch.ones_like(x)) print(x.grad, z.grad) ``` - 下面的操作与取1张量的原理完全一致 - 只是用户自己做了这个内积运算而已。 ``` import torch x = torch.Tensor([1, 2, 3]) x.requires_grad=True z = (x**2).sum() # 直接求和 z.backward() print(x.grad, z.grad) ``` ## 复杂的求导运算例子 - 下面是计算的图示意图: - ![image.png](attachment:image.png) ``` import torch # 叶子节点 x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True # 中间节点 xy = x + y xy2 = xy ** 2 z3 = z ** 3 xy2z3=xy2 * z3 # 求导数 xy2z3.backward(torch.Tensor([1.0, 1.0, 1.0])) print(x.grad, y.grad, z.grad) print(xy.grad, xy2.grad, z3.grad, xy2z3.grad) # 没有梯度,因为不是叶子节点 print(xy.grad_fn, xy2.grad_fn, z3.grad_fn, xy2z3.grad_fn) print(xy.requires_grad, xy2.requires_grad, z3.requires_grad, xy2z3.requires_grad) ``` ## 中间导数 - 使用上面模式编程,可以发现其中只计算出输入变量的导数,中间变量的导数是无法获取的,如果想获取中间变量的导数,需要注册一个回调钩子函数,通过这个函数返回。 - 获取中间变量导数的例子 ``` import torch # 叶子节点 x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True # 中间节点 xy = x + y # xyz = xy * z # xyz.backward(torch.Tensor([1, 1, 1])) xyz = torch.dot(xy, z) # ==================== def get_xy_grad(grad): print(F"xy的导数:{ grad }") # 可以保存到全局变量使用。 xy.register_hook(get_xy_grad) # ==================== xyz.backward() print(x.grad, y.grad, z.grad) print(xy.grad, y.grad, z.grad) ``` ## 高阶导数 1. 提供create_graph参数用来保留导数的图,用来实现高级导数的计算。 2. 高阶导数因为不是叶子节点,需要通过回调钩子获取 ``` import torch x = torch.Tensor([1]) x.requires_grad=True z = x**6 # 求导函数 z.backward(create_graph=True) # retain_graph保留的是本身的运算图,create_graph是保留微分图 print(x.grad) # 导数3 # ==================== def get_xy_grad(grad): print(F"x.grad的高阶导数:{ grad }") # 可以保存到全局变量使用。 x.register_hook(get_xy_grad) # ==================== x.grad.backward(create_graph=True) ``` # Tensor的自动求导 - 有了上面的基础,下面看torch.autograd中的自动求导,就基本上非常简单。 - Torch提供了torch.autograd模块来实现自动求导,该模块暴露的调用如下: - `['Variable', 'Function', 'backward', 'grad_mode']` ## backward的使用 - autograd提供的backward是Tensor的backward的静态函数版本,使用谈不上便捷,但多了一个选择; ```python torch.autograd.backward( tensors, grad_tensors=None, retain_graph=None, create_graph=False, grad_variables=None) ``` - 参数说明: - tensors:被求导的向量(必须具有grad_fn); - grad_tensors=None:梯度向量; - retain_graph=None:保留计算图; - create_graph=False:创建个高阶微分图(可以自己手工得到高阶导数,也可以使用下面的grad封装函数); - grad_variables=None:兼容原来Variable版本的参数,在新的版本中不再使用。 - torch.autograd.backward函数的使用例子 - 参数grad_variables在我的这个版本中,已经不能使用。 ``` import torch # 叶子节点 x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True # 中间节点 xy = x + y # xyz = xy * z # xyz.backward(torch.Tensor([1, 1, 1])) xyz = torch.dot(xy, z) # ==================== def get_xy_grad(grad): print(F"xy的导数:{ grad }") # 可以保存到全局变量使用。 xy.register_hook(get_xy_grad) # ==================== torch.autograd.backward(xyz) print(x.grad, y.grad, z.grad) print(xy.grad, y.grad, z.grad) ``` ## grad的使用 - 用来计算输出关于输入的梯度的和,不是返回所有的梯度,而是对某个输入变量的求导:$\dfrac{\partial{z}}{\partial{x}}$ - 这个函数的功能应该与hook功能类似。 - grad函数的定义: ```python torch.autograd.grad( outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False, only_inputs=True, allow_unused=False) ``` - 参数说明: - outputs:输出张量列表,与backward函数中的tensors作用一样; - inputs:输入张量列表,用来调用register_hook的张量; - grad_outputs:梯度张量列表,与backward函数中的grad_tensors作用一样; - retain_graph:逻辑值,用来指定运算完毕是否清除计算图; - create_graph:逻辑值,用来创建梯度的计算图(梯度的梯度就是高阶导数) - only_inputs:逻辑值,用来指定返回的计算结果,不仅仅是inputs指定的张量,而是计算所有叶子节点的导数。默认值True:这个参数已经不推荐使用,而且已经没有作用了,向计算叶子节点的导数没使用backward函数。 - allow_unused:逻辑值,用来检测是否每个输入都用来计算输出,False表示不需要,True表示如果有输入没有用于输出计算,则抛出错误。如果没有输入都是用,则True与False结果都一样。默认值False - grad的使用例子 ``` import torch # 叶子节点 x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True # 中间节点 xy = x + y xyz = torch.dot(xy, z) # ==================== gd = torch.autograd.grad(xyz, x, retain_graph=True) print(x.grad, y.grad, z.grad) print(xy.grad, y.grad, z.grad) print(gd) print(torch.autograd.grad(xyz, xy,retain_graph=True)) print(torch.autograd.grad(xyz, y,retain_graph=True)) print(torch.autograd.grad(xyz, z,retain_graph=True, allow_unused=True)) # ==================== ``` ### grad的高阶求导 - 使用create_graph创建导数的图,并对导数再求导,从而实现高阶求导。 ``` import torch x = torch.Tensor([1]) x.requires_grad=True z = x**6 # 求导函数 gd_1 = torch.autograd.grad(z, x, create_graph=True) gd_2 = torch.autograd.grad(gd_1, x) print(F"一阶导数:{gd_1},\n二阶导数: {gd_2}") ``` # 求导的控制 ## set_grad_enabled类 - set_grad_enabled函数可以开启与关闭导数计算 - 一个上下文管理对象 - 函数声明如下: ```python torch.autograd.set_grad_enabled(mode) ``` - 参数: - mode:逻辑值,True开启,False关闭 ### 通常使用例子 ``` import torch x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True torch.autograd.set_grad_enabled(False) # 全局上下文 xy = x + y xyz = torch.dot(xy, z) torch.autograd.set_grad_enabled(True) print(xy.requires_grad, xyz.requires_grad, z.requires_grad) ``` ### 上下文使用例子 ``` import torch x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True with torch.autograd.set_grad_enabled(False) as grad_ctx: # 局部上下文 xy = x + y # 块结束,作用范围自动结束 xyz = torch.dot(xy, z) print(xy.requires_grad, xyz.requires_grad, z.requires_grad) ``` ## enable_grad类 - 这个类是一个装饰器类,提供更加简捷的开启方式。 - 也是一个上下文管理器; - 装饰器用于函数与类; ```python torch.autograd.enable_grad() ``` ### 装饰器使用例子 ``` import torch x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True @ torch.autograd.enable_grad() def func_xy(x, y): return x + y # 块结束,作用范围自动结束 xy = func_xy(x, y) xyz = torch.dot(xy, z) print(xy.requires_grad, xyz.requires_grad, z.requires_grad) ``` ### 上下文使用例子 ``` import torch x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True with torch.autograd.enable_grad(): xy = x + y xyz = torch.dot(xy, z) print(xy.requires_grad, xyz.requires_grad, z.requires_grad) ``` ## no_grad类 - 与enable_grad类一样的使用方式,作用却相反。 - 注意: - no_grad与enable_grad是函数装饰器,不是类装饰器; ### 装饰器使用方式 - 对整个函数作用,适合函数模式,如果函数中有特殊的情况,可以嵌套使用。 ``` import torch x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True @ torch.autograd.no_grad() def func_xy(x, y): return x + y # 块结束,作用范围自动结束 xy = func_xy(x, y) xyz = torch.dot(xy, z) print(xy.requires_grad, xyz.requires_grad, z.requires_grad) ``` ### 上下文使用方式 - 适合于在非函数情况下使用 ``` import torch x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True with torch.autograd.no_grad(): xy = x + y xyz = torch.dot(xy, z) print(xy.requires_grad, xyz.requires_grad, z.requires_grad) ``` ### no_grad与enable_grad混合使用 - 这种混合使用,可以满足开发的任何情况的需求; ``` import torch x = torch.Tensor([1, 2, 3]) y = torch.Tensor([3, 4, 5]) z = torch.Tensor([1, 2, 3]) x.requires_grad=True y.requires_grad=True z.requires_grad=True with torch.autograd.no_grad(): xy = x + y with torch.autograd.enable_grad(): z3 = z **3 xy2 = xy ** 2 # 因为xy的requires_grad=False,整个运算也是False print(xy.requires_grad, z3.requires_grad, xy2.requires_grad) ``` ----
github_jupyter
``` import numpy as np import os from PIL import Image from sklearn.preprocessing import LabelBinarizer import sys import glob import argparse import matplotlib.pyplot as plt import pickle as pkl from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions from keras.models import Model, Sequential from keras.layers import Dropout, Flatten,Dense, GlobalAveragePooling2D, LeakyReLU from keras import optimizers from tensorflow.keras.optimizers import Adam,SGD from keras.preprocessing import image from keras.callbacks import ModelCheckpoint, EarlyStopping from sklearn.model_selection import train_test_split import tensorflow as tf from keras.utils.np_utils import to_categorical from keras import backend as K print('done') Data_X = np.load() # path for Major training image data Data_Y = np.load() # path for Major training class data Data_Y = np.array(Data_Y) print(np.unique(Data_Y)) Y_new = [] count_bcc = 0 count_bkl = 0 count_mel = 0 count_nv = 0 count_other = 0 for i in Data_Y: if i == 'bcc': Y_new.append(0) count_bcc = count_bcc + 1 elif i == 'bkl': Y_new.append(1) count_bkl = count_bkl + 1 elif i == 'mel': Y_new.append(2) count_mel = count_mel + 1 elif i == 'nv': Y_new.append(3) count_nv = count_nv + 1 elif i == 'other': Y_new.append(4) count_other = count_other + 1 print('bcc - ',count_bcc) print('bkl - ',count_bkl) print('mel - ', count_mel) print('nv - ',count_nv) print('other - ',count_other) print(Y_new) Y_new = np.array(Y_new) X_train, X_test, Y_train, Y_test = train_test_split(Data_X, Y_new, test_size=0.15, random_state=69,stratify= Y_new) print(X_train.shape) print(Y_train.shape) Y_train = to_categorical(Y_train, num_classes=5) Y_test = to_categorical(Y_test, num_classes=5) print('Train dataset shape',X_train.shape) print('Test dataset shape',X_test.shape) print(Y_train.shape) from tensorflow.keras.applications import ResNet50 base_model = ResNet50(input_shape=(76, 76,3), include_top=False, weights="imagenet") for layer in base_model.layers: layer.trainable = False from tensorflow.keras.applications import ResNet50 from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.layers import Dense, Flatten, GlobalAveragePooling2D base_model.add(Dropout(0.1)) base_model = Sequential() base_model.add(ResNet50(include_top=False, weights='imagenet', pooling='max')) base_model.add(Dense(5, activation='softmax')) base_model.summary() import time from tensorflow.keras.preprocessing.image import ImageDataGenerator start = time.time() early_stopping_monitor = EarlyStopping(patience=100,monitor='val_accuracy') model_checkpoint_callback = ModelCheckpoint(filepath='resnet.h5', save_weights_only=False, monitor='val_accuracy', mode='auto', save_best_only=True, verbose=1) batch_size = 64 epochs = 250 optimizer = SGD(learning_rate=0.0001) base_model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics=['accuracy']) datagen = ImageDataGenerator(zoom_range = 0.2, shear_range=0.2) datagen.fit(X_train) history=base_model.fit(datagen.flow(X_train,Y_train), epochs=epochs, batch_size=batch_size, shuffle=True, callbacks=[early_stopping_monitor,model_checkpoint_callback], validation_data=(X_test, Y_test)) end = time.time() print("////////////////////////////Time Taken////////////////////////////////////////") print(end-start) ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Taking advantage of context features <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/recommenders/examples/context_features"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/recommenders/blob/main/docs/examples/context_features.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/recommenders/blob/main/docs/examples/context_features.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/recommenders/docs/examples/context_features.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> In [the featurization tutorial](featurization) we incorporated multiple features beyond just user and movie identifiers into our models, but we haven't explored whether those features improve model accuracy. Many factors affect whether features beyond ids are useful in a recommender model: 1. __Importance of context__: if user preferences are relatively stable across contexts and time, context features may not provide much benefit. If, however, users preferences are highly contextual, adding context will improve the model significantly. For example, day of the week may be an important feature when deciding whether to recommend a short clip or a movie: users may only have time to watch short content during the week, but can relax and enjoy a full-length movie during the weekend. Similarly, query timestamps may play an important role in modelling popularity dynamics: one movie may be highly popular around the time of its release, but decay quickly afterwards. Conversely, other movies may be evergreens that are happily watched time and time again. 2. __Data sparsity__: using non-id features may be critical if data is sparse. With few observations available for a given user or item, the model may struggle with estimating a good per-user or per-item representation. To build an accurate model, other features such as item categories, descriptions, and images have to be used to help the model generalize beyond the training data. This is especially relevant in [cold-start](https://en.wikipedia.org/wiki/Cold_start_(recommender_systems)) situations, where relatively little data is available on some items or users. In this tutorial, we'll experiment with using features beyond movie titles and user ids to our MovieLens model. ## Preliminaries We first import the necessary packages. ``` !pip install -q tensorflow-recommenders !pip install -q --upgrade tensorflow-datasets import os import tempfile import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_recommenders as tfrs ``` We follow [the featurization tutorial](featurization) and keep the user id, timestamp, and movie title features. ``` ratings = tfds.load("movielens/100k-ratings", split="train") movies = tfds.load("movielens/100k-movies", split="train") ratings = ratings.map(lambda x: { "movie_title": x["movie_title"], "user_id": x["user_id"], "timestamp": x["timestamp"], }) movies = movies.map(lambda x: x["movie_title"]) ``` We also do some housekeeping to prepare feature vocabularies. ``` timestamps = np.concatenate(list(ratings.map(lambda x: x["timestamp"]).batch(100))) max_timestamp = timestamps.max() min_timestamp = timestamps.min() timestamp_buckets = np.linspace( min_timestamp, max_timestamp, num=1000, ) unique_movie_titles = np.unique(np.concatenate(list(movies.batch(1000)))) unique_user_ids = np.unique(np.concatenate(list(ratings.batch(1_000).map( lambda x: x["user_id"])))) ``` ## Model definition ### Query model We start with the user model defined in [the featurization tutorial](featurization) as the first layer of our model, tasked with converting raw input examples into feature embeddings. However, we change it slightly to allow us to turn timestamp features on or off. This will allow us to more easily demonstrate the effect that timestamp features have on the model. In the code below, the `use_timestamps` parameter gives us control over whether we use timestamp features. ``` class UserModel(tf.keras.Model): def __init__(self, use_timestamps): super().__init__() self._use_timestamps = use_timestamps self.user_embedding = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.StringLookup( vocabulary=unique_user_ids, mask_token=None), tf.keras.layers.Embedding(len(unique_user_ids) + 1, 32), ]) if use_timestamps: self.timestamp_embedding = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.Discretization(timestamp_buckets.tolist()), tf.keras.layers.Embedding(len(timestamp_buckets) + 1, 32), ]) self.normalized_timestamp = tf.keras.layers.experimental.preprocessing.Normalization() self.normalized_timestamp.adapt(timestamps) def call(self, inputs): if not self._use_timestamps: return self.user_embedding(inputs["user_id"]) return tf.concat([ self.user_embedding(inputs["user_id"]), self.timestamp_embedding(inputs["timestamp"]), self.normalized_timestamp(inputs["timestamp"]), ], axis=1) ``` Note that our use of timestamp features in this tutorial interacts with our choice of training-test split in an undesirable way. Because we have split our data randomly rather than chronologically (to ensure that events that belong to the test dataset happen later than those in the training set), our model can effectively learn from the future. This is unrealistic: after all, we cannot train a model today on data from tomorrow. This means that adding time features to the model lets it learn _future_ interaction patterns. We do this for illustration purposes only: the MovieLens dataset itself is very dense, and unlike many real-world datasets does not benefit greatly from features beyond user ids and movie titles. This caveat aside, real-world models may well benefit from other time-based features such as time of day or day of the week, especially if the data has strong seasonal patterns. ### Candidate model For simplicity, we'll keep the candidate model fixed. Again, we copy it from the [featurization](featurization) tutorial: ``` class MovieModel(tf.keras.Model): def __init__(self): super().__init__() max_tokens = 10_000 self.title_embedding = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.StringLookup( vocabulary=unique_movie_titles, mask_token=None), tf.keras.layers.Embedding(len(unique_movie_titles) + 1, 32) ]) self.title_vectorizer = tf.keras.layers.experimental.preprocessing.TextVectorization( max_tokens=max_tokens) self.title_text_embedding = tf.keras.Sequential([ self.title_vectorizer, tf.keras.layers.Embedding(max_tokens, 32, mask_zero=True), tf.keras.layers.GlobalAveragePooling1D(), ]) self.title_vectorizer.adapt(movies) def call(self, titles): return tf.concat([ self.title_embedding(titles), self.title_text_embedding(titles), ], axis=1) ``` ### Combined model With both `UserModel` and `MovieModel` defined, we can put together a combined model and implement our loss and metrics logic. Note that we also need to make sure that the query model and candidate model output embeddings of compatible size. Because we'll be varying their sizes by adding more features, the easiest way to accomplish this is to use a dense projection layer after each model: ``` class MovielensModel(tfrs.models.Model): def __init__(self, use_timestamps): super().__init__() self.query_model = tf.keras.Sequential([ UserModel(use_timestamps), tf.keras.layers.Dense(32) ]) self.candidate_model = tf.keras.Sequential([ MovieModel(), tf.keras.layers.Dense(32) ]) self.task = tfrs.tasks.Retrieval( metrics=tfrs.metrics.FactorizedTopK( candidates=movies.batch(128).map(self.candidate_model), ), ) def compute_loss(self, features, training=False): # We only pass the user id and timestamp features into the query model. This # is to ensure that the training inputs would have the same keys as the # query inputs. Otherwise the discrepancy in input structure would cause an # error when loading the query model after saving it. query_embeddings = self.query_model({ "user_id": features["user_id"], "timestamp": features["timestamp"], }) movie_embeddings = self.candidate_model(features["movie_title"]) return self.task(query_embeddings, movie_embeddings) ``` ## Experiments ### Prepare the data We first split the data into a training set and a testing set. ``` tf.random.set_seed(42) shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False) train = shuffled.take(80_000) test = shuffled.skip(80_000).take(20_000) cached_train = train.shuffle(100_000).batch(2048) cached_test = test.batch(4096).cache() ``` ### Baseline: no timestamp features We're ready to try out our first model: let's start with not using timestamp features to establish our baseline. ``` model = MovielensModel(use_timestamps=False) model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1)) model.fit(cached_train, epochs=3) train_accuracy = model.evaluate( cached_train, return_dict=True)["factorized_top_k/top_100_categorical_accuracy"] test_accuracy = model.evaluate( cached_test, return_dict=True)["factorized_top_k/top_100_categorical_accuracy"] print(f"Top-100 accuracy (train): {train_accuracy:.2f}.") print(f"Top-100 accuracy (test): {test_accuracy:.2f}.") ``` This gives us a baseline top-100 accuracy of around 0.2. ### Capturing time dynamics with time features Do the result change if we add time features? ``` model = MovielensModel(use_timestamps=True) model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1)) model.fit(cached_train, epochs=3) train_accuracy = model.evaluate( cached_train, return_dict=True)["factorized_top_k/top_100_categorical_accuracy"] test_accuracy = model.evaluate( cached_test, return_dict=True)["factorized_top_k/top_100_categorical_accuracy"] print(f"Top-100 accuracy (train): {train_accuracy:.2f}.") print(f"Top-100 accuracy (test): {test_accuracy:.2f}.") ``` This is quite a bit better: not only is the training accuracy much higher, but the test accuracy is also substantially improved. ## Next Steps This tutorial shows that even simple models can become more accurate when incorporating more features. However, to get the most of your features it's often necessary to build larger, deeper models. Have a look at the [deep retrieval tutorial](deep_recommenders) to explore this in more detail.
github_jupyter
<h2> Basics of Python: Lists </h2> We review using Lists in Python here. Please run each cell and check the results. A list (or array) is a collection of objects (variables) separated by comma. The order is important, and we can access each element in the list with its index starting from 0. ``` # here is a list holding all even numbers between 10 and 20 L = [10, 12, 14, 16, 18, 20] # let's print the list print(L) # let's print each element by using its index but in reverse order print(L[5],L[4],L[3],L[2],L[1],L[0]) # let's print the length (size) of list print(len(L)) # let's print each element and its index in the list # we use a for-loop, and the number of iteration is determined by the length of the list # everthing is automatical :-) L = [10, 12, 14, 16, 18, 20] for i in range(len(L)): print(L[i],"is the element in our list with the index",i) # let's replace each number in the above list with its double value # L = [10, 12, 14, 16, 18, 20] # let's print the list before doubling operation print("the list before doubling operation is",L) for i in range(len(L)): current_element=L[i] # get the value of the i-th element L[i] = 2 * current_element # update the value of the i-th element # let's shorten the code as #L[i] = 2 * L[i] # or #L[i] *= 2 # let's print the list after doubling operation print("the list after doubling operation is",L) # after each execution of this cell, the latest values will be doubled # so the values in the list will be exponentially increased # let's define two lists L1 = [1,2,3,4] L2 = [-5,-6,-7,-8] # two lists can be concatenated # the result is a new list print("the concatenation of L1 and L2 is",L1+L2) # the order of terms is important print("the concatenation of L2 and L1 is",L2+L1) # this is a different list than L1+L2 # we can add a new element to a list, which increases its length/size by 1 L = [10, 12, 14, 16, 18, 20] print(L,"the current length is",len(L)) # we add two values by showing two different methods # L.append(value) directly adds the value as a new element to the list L.append(-4) # we can also use concatenation operator + L = L + [-8] # here [-8] is a list having a single element print(L,"the new length is",len(L)) # a list can be multiplied with an integer L = [1,2] # we can consider the multiplication of L by an integer as a repeated summation (concatenation) of L by itself # L * 1 is the list itself # L * 2 is L + L (the concatenation of L with itself) # L * 3 is L + L + L (the concatenation of L with itself twice) # L * m is L + ... + L (the concatenation of L with itself m-1 times) # L * 0 is the empty list # L * i is the same as i * L # let's print the different cases for i in range(6): print(i,"* L is",i*L) # this operation can be useful when initializing a list with the same value(s) # let's create a list of prime numbers less than 100 # here is a function that determines whether a given number is prime or not def prime(number): if number < 2: return False if number == 2: return True if number % 2 == 0: return False for i in range(3,number,2): if number % i == 0: return False return True # end of a function # let's start with an empty list L=[] # what can the length of this list be? print("my initial length is",len(L)) for i in range(2,100): if prime(i): L.append(i) # alternative methods: #L = L + [i] #L += [i] # print the final list print(L) print("my final length is",len(L)) ``` For a given integer $n \geq 0$, $ S(0) = 0 $, $ S(1)=1 $, and $ S(n) = 1 + 2 + \cdots + n $. We define list $ L(n) $ such that the element with index $n$ holds $ S(n) $. In other words, the elements of $ L(n) $ are $ [ S(0)~~S(1)~~S(2)~~\cdots~~S(n) ] $. Let's build the list $ L(20) $. ``` # let's define the list with S(0) L = [0] # let's iteratively define n and S # initial values n = 0 S = 0 # the number of iterations N = 20 while n <= N: # we iterate all values from 1 to 20 n = n + 1 S = S + n L.append(S) # print the final list print(L) ``` <h3> Task 1 </h3> Fibonacci sequence starts with $ 1 $ and $ 1 $. Then, each next element is equal to the summation of the previous two elements: $$ 1, 1, 2 , 3 , 5, 8, 13, 21, 34, 55, \ldots $$ Find the first 30 elements of the Fibonacci sequence, store them in a list, and then print the list. You can verify the first 10 elements of your result with the above list. ``` # # your solution is here # the first and second elements are 1 and 1 F = [1,1] for i in range(2,30): F.append(F[i-1] + F[i-2]) # print the final list print(F) ``` <h3> Lists of different objects </h3> A list can have any type of values. ``` # the following list stores certain information about Asja # name, surname, age, profession, height, weight, partner(s) if any, kid(s) if any, the creation date of list ASJA = ['Asja','Sarkane',34,'musician',180,65.5,[],['Eleni','Fyodor'],"October 24, 2018"] print(ASJA) # Remark that an element of a list can be another list as well. ``` <h3> Task 2 </h3> Define a list $ N $ with 11 elements such that $ N[i] $ is another list with four elements such that $ [i, i^2, i^3, i^2+i^3] $. The index $ i $ should be between $ 0 $ and $ 10 $. ``` # # your solution is here # # define an empty list N = [] for i in range(11): N.append([ i , i*i , i*i*i , i*i + i*i*i ]) # a list having four elements is added to the list N # Alternatively: #N.append([i , i**2 , i**3 , i**2 + i**3]) # ** is the exponent operator #N = N + [ [i , i*i , i*i*i , i*i + i*i*i] ] # Why using double brackets? #N = N + [ [i , i**2 , i**3 , i**2 + i**3] ] # Why using double brackets? # In the last two alternative solutions, you may try with a single bracket, # and then see why double brackets are needed for the exact answer. # print the final list print(N) # let's print the list N element by element for i in range(len(N)): print(N[i]) # let's print the list N element by element by using an alternative method for el in N: # el will iteratively takes the values of elements in N print(el) ``` <h3> Dictionaries </h3> The outcomes of a quantum program (circuit) will be stored in a dictionary. Therefore, we very shortly mention about the dictionary data type. A dictionary is a set of paired elements. Each pair is composed by a key and its value, and any value can be accessed by its key. ``` # let's define a dictionary pairing a person with her/his age ages = { 'Asja':32, 'Balvis':28, 'Fyodor':43 } # let print all keys for person in ages: print(person) # let's print the values for person in ages: print(ages[person]) ```
github_jupyter
# lab2 Logisitic Regression ``` %matplotlib inline import numpy as np import matplotlib import pandas as pd import matplotlib.pyplot as plt import scipy.optimize as op ``` ## 1. Load Data ``` data = pd.read_csv('ex2data1.txt') X = np.array(data.iloc[:,0:2]) y = np.array(data.iloc[:,2]) print('X.shape = ' + str(X.shape)) print('y.shape = ' + str(y.shape)) def plotData(X, y): k1 = (y==1) k2 = (y==0) plt.scatter(X[k1,0], X[k1,1], c='r',marker='+') plt.scatter(X[k2,0], X[k2,1], c='b',marker='o') plt.xlabel('Exam 1 score') plt.ylabel('Exam 2 score') plt.legend(['Admitted', 'Not admitted']) plotData(X, y) plt.show() # 在X左侧添加全1的列 m = X.shape[0] n = X.shape[1] X = np.hstack((np.ones((m,1)), X)) print('X.shape = ' + str(X.shape)) ini_theta = np.zeros((n+1, 1)) ``` ## 2. Cost and Gradient $$ g(z)=\frac{1}{1+e^{-z}} $$ $$ J(\theta)=\frac{1}{m}\sum_{i=1}^{m}[-y^{(i)}log(h_\theta(x^{(i)}))-(1-y^{(i)})log(1-h_\theta(x^{(i)}))] $$ $$ \frac{\partial J(\theta)}{\partial\theta_j}=\frac{1}{m}\sum_{i=1}^{m} [(h_\theta(x^{(i)})-y^{(i)})x^{(i)}_j] $$ ``` def sigmoid(z): return 1 / (1+np.exp(-z)) def gradient(theta, X, y): '''compute gradient args: X - X.shape = (m,n) theta - theta.shape = (n,1) y - y.shape = (m,1) return: grade - the gradient ''' m = X.shape[0] n = X.shape[1] theta = theta.reshape((n,1)) y = y.reshape((m,1)) h = sigmoid(np.dot(X, theta)) tmp = np.sum((h-y)*X, axis=0) / m grade = tmp.reshape(theta.shape) return grade def costFunction(theta, X, y): '''compute cost args: X - X.shape = (m,n) theta - theta.shape = (n,1) y - y.shape = (m,1) return: J - the cost ''' m = X.shape[0] n = X.shape[1] theta = theta.reshape((n,1)) y = y.reshape((m,1)) h = sigmoid(np.dot(X, theta)) term1 = y * np.log(h) term2 = (1-y) * np.log(1-h) J = sum(- term1 - term2) / m return J grade = gradient(ini_theta, X, y) cost= costFunction(ini_theta, X, y) print('cost = ' + str(cost)) grade test_theta = [[-24], [0.2], [0.2]] test_theta = np.array(test_theta) grade = gradient(test_theta, X, y) cost = costFunction(test_theta, X, y) print('cost = ' + str(cost)) grade ``` ## 3. predict 这里使用scipy中的替代优化器 ``` result = op.minimize(fun=costFunction, x0=ini_theta, args=(X, y), method='TNC', jac=gradient) optimal_theta = result.x optimal_theta def plotDecisionBoundary(theta, X, y): '''绘制边界直线 ''' plotData(X[:,1:3], y) plot_x = np.array([np.min(X[:,1])-2, np.max(X[:,1])+2]) # theta0 + theta1 * x1 + theta2 * x2 == 0 # 代入sigmoid函数 # g(z) = 1/2 是判断1和0的阈值 plot_y = -1 / theta[2] * (theta[1]*plot_x + theta[0]) plt.plot(plot_x, plot_y) plotDecisionBoundary(optimal_theta, X, y) plt.show() def predict(theta, X): m = X.shape[0] pred = np.zeros((m,1)) h = sigmoid(np.dot(X, theta)) pred[h>=0.5] = 1 return pred.flatten() prob = np.array([1, 45, 85]) prob = sigmoid(np.dot(prob, optimal_theta)) prob # 计算准确率,这里的mean函数使用巧妙 p = predict(optimal_theta, X) print('Train accuracy = {}%'.format(100 * np.mean(p==y)) ) ```
github_jupyter
# Inference This notebook is dedicated to testing and visualizing results for both the wiki and podcast datasets Note: Apologies for the gratuitous warnings. Tensorflow is aware of these issues and has rectified them in later versions of TensorFlow. Unfortunately, they persist for version 1.13. ``` from src.SliceNet import SliceNet from src.netUtils import getSingleExample import matplotlib.pyplot as plt from pathlib import Path import numpy as np import pandas as pd import seaborn as sns import random import math import warnings warnings.filterwarnings('ignore') import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' if type(tf.contrib) != type(tf): tf.contrib._warning = None %load_ext autoreload %autoreload 2 # Choose whether to use the base network or the network with self-attention attention = True # Current best networks best_base_wiki = '/home/bmmidei/SliceCast/models/04_20_2019_2300_final.h5' best_base_podcast = '/home/bmmidei/SliceCast/models/04_26_2019_1000_podcast.h5' best_attn_wiki = '/home/bmmidei/SliceCast/models/05_03_2019_0800_attn.h5' best_attn_podcast = '/home/bmmidei/SliceCast/models/05_02_2019_2200_attn_podcast.h5' if attention: weights_wiki = best_attn_wiki weights_podcast = best_attn_podcast else: weights_wiki = best_base_wiki weights_podcast = best_base_podcast net = SliceNet(classification=True, class_weights=[1.0,10,0.2], attention=attention) ``` ## Sample predictions on unseen wiki articles Note that this section relies on wikipedia ``` dataPath = Path('/home/bmmidei/SliceCast/data/wiki-sample/') files = [str(x) for x in dataPath.glob('**/*') if x.suffix=='.hdf5'] mask = random.sample(range(0,len(files)), 1) # randomly select a file to test test_file = [x for (i,x) in enumerate(files) if i in mask][0] k = 4 num_samples = 16 preds, labels, pk = net.predict(test_file=test_file, num_samples=num_samples, weights_path=weights_wiki, k=k) print('Average PK score with k={} on {} examples is: {:0.3f}'.format(k, num_samples, pk)) np.set_printoptions(suppress=True) preds = np.argmax(preds, axis=2) labels = np.argmax(labels, axis=2) # Choose the index of the document you want to examine idx = 2 # You can keep running this cell with different indices to visualize different # documents within this batch of testing # Note: The graph displays n sentences where n is the length of the longest # document in the batch. As such, there may be padding sections at the beginning # of the document with label and prediction of value 2 df = pd.DataFrame() df['preds'] = preds[idx,:] df['labels'] = labels[idx,:] df['sent_number'] = df.index fig, axes = plt.subplots(nrows=2, ncols=1) df.plot(x='sent_number', y='preds', figsize=(10,5), grid=True, ax=axes[0]) df.plot(x='sent_number', y='labels', figsize=(10,5), grid=True, ax=axes[1], color='green') ``` ## Sample predictions on unseen podcast data ``` test_file = '/home/bmmidei/SliceCast/data/podcasts/hdf5/batch0_0.hdf5' k = 33 num_samples = 2 preds, labels, pk = net.predict(test_file=test_file, num_samples=num_samples, weights_path=weights_podcast, k=k) print('Average PK score with k={} on {} examples is: {:0.3f}'.format(k, num_samples, pk)) np.set_printoptions(suppress=True) preds = np.argmax(preds, axis=2) labels = np.argmax(labels, axis=2) # Choose the document you want to examine idx = 1 df = pd.DataFrame() df['preds'] = preds[idx,:] df['labels'] = labels[idx,:] df['sent_number'] = df.index fig, axes = plt.subplots(nrows=2, ncols=1) df.plot(x='sent_number', y='preds', figsize=(10,5), grid=True, ax=axes[0]) df.plot(x='sent_number', y='labels', figsize=(10,5), grid=True, ax=axes[1], color='green') ``` ## Predictions on a single text file ``` text_file = '/home/bmmidei/SliceCast/data/podcasts/with_timestamps/joe1254.txt' is_labeled = True weights_path = weights_podcast # transfer learning sents, labels = getSingleExample(fname=text_file, is_labeled=is_labeled) sents = np.expand_dims(sents, axis=0) preds = net.singlePredict(sents, weights_path=weights_path) # Place data into a pandas dataframe for analysis df = pd.DataFrame() preds = np.argmax(np.squeeze(preds), axis=-1) df['raw_sentences'] = sents[0] if is_labeled: df['labels'] = labels df['preds'] = preds df['sent_number'] = df.index fig, axes = plt.subplots(nrows=2, ncols=1) df.plot(x='sent_number', y='preds', figsize=(10,5), grid=True, ax=axes[0]) df.plot(x='sent_number', y='labels', figsize=(10,5), grid=True, ax=axes[1], color='green') ``` ## Keyword Extraction The following cells are experimental code to extract keywords for each segment in order to provide context for each segment. ``` from src.postprocess import getSummaries, getTimeStamps import nltk nltk.download('stopwords') keywords = getSummaries(sents[0], preds) stamps = getTimeStamps(sents[0], '/home/bmmidei/SliceCast/data/podcasts/with_timestamps/joe1254.json', preds) seconds = [x%60 for x in stamps] minutes = [math.floor(x/60) for x in stamps] for i, (x, y)in enumerate(zip(minutes, seconds)): print("{}:{}".format(x, y), end="") print([x[0] for x in keywords[i]]) ```
github_jupyter
# "The Role of Wide Baseline Stereo in the Deep Learning World" > "Short history of wide baseline stereo in computer vision" - toc: false - image: images/doll_wbs_300.png - branch: master - badges: true - comments: true - hide: false - search_exclude: false ## Rise of Wide Multiple Baseline Stereo The *wide multiple baseline stereo (WxBS)* is a process of establishing a sufficient number of pixel or region correspondences from two or more images depicting the same scene to estimate the geometric relationship between cameras, which produced these images. Typically, WxBS relies on the scene rigidity -- the assumption that there is no motion in the scene except the motion of the camera itself. The stereo problem is called wide multiple baseline if the images are significantly different in more than one aspect: viewpoint, illumination, time of acquisition, and so on. Historically, people were focused on the simpler problem with a single baseline, which was geometrical, i.e., viewpoint difference between cameras, and the area was known as wide baseline stereo. Nowadays, the field is mature and research is focused on solving more challenging multi-baseline problems. WxBS is a building block of many popular computer vision applications, where spatial localization or 3D world understanding is required -- panorama stitching, 3D reconstruction, image retrieval, SLAM, etc. If the wide baseline stereo is a new concept for you, I recommend checking the [examplanation in simple terms](https://ducha-aiki.github.io/wide-baseline-stereo-blog/2021/01/09/wxbs-in-simple-terms.html). ![](00_intro_files/match_doll.png "Correspondences between two views found by wide baseline stereo algorithm. Photo and doll created by Olha Mishkina") **Where does wide baseline stereo come from?** As often happens, a new problem arises from the old -- narrow or short baseline stereo. In the narrow baseline stereo, images are taken from nearby positions, often exactly at the same time. One could find correspondence for the point $(x,y)$ from the image $I_1$ in the image $I_2$ by simply searching in some small window around $(x,y)$\cite{Hannah1974ComputerMO, Moravec1980} or, assuming that camera pair is calibrated and the images are rectified -- by searching along the epipolar line\cite{Hartley2004}. ![](2020-03-27-intro_files/att_00003.png "Correspondence search in narrow baseline stereo, from Moravec 1980 PhD thesis.") <!--- ![Wide baseline stereo model. "Baseline" is the distance between cameras. Image by Arne Nordmann (WikiMedia)](00_intro_files/Epipolar_geometry.svg) --> One of the first, if not the first, approaches to the wide baseline stereo problem was proposed by Schmid and Mohr \cite{Schmid1995} in 1995. Given the difficulty of the wide multiple baseline stereo task at the moment, only a single --- geometrical -- baseline was considered, thus the name -- wide baseline stereo (WBS). The idea of Schmid and Mohr was to equip each keypoint with an invariant descriptor. This allowed establishing tentative correspondences between keypoints under viewpoint and illumination changes, as well as occlusions. One of the stepping stones was the corner detector by Harris and Stevens \cite{Harris88}, initially used for the application of tracking. It is worth a mention, that there were other good choices for the local feature detector at the time, starting with the Forstner \cite{forstner1987fast}, Moravec \cite{Moravec1980} and Beaudet feature detectors \cite{Hessian78}. The Schmid and Mohr approach was later extended by Beardsley, Torr and Zisserman \cite{Beardsley96} by adding RANSAC \cite{RANSAC1981} robust geometry estimation and later refined by Pritchett and Zisserman \cite{Pritchett1998, Pritchett1998b} in 1998. The general pipeline remains mostly the same until now \cite{WBSTorr99, CsurkaReview2018, IMW2020}, which is shown in Figure below. <!--- ![image.png](00_intro_files/att_00002.png) --> ![](00_intro_files/matching-filtering.png "Commonly used wide baseline stereo pipeline") Let's write down the WxBS algorithm: 1. Compute interest points/regions in all images independently 2. For each interest point/region compute a descriptor of their neigborhood (local patch). 3. Establish tentative correspondences between interest points based on their descriptors. 4. Robustly estimate geometric relation between two images based on tentative correspondences with RANSAC. The reasoning behind each step is described in [this separate post](https://ducha-aiki.github.io/wide-baseline-stereo-blog/2021/02/11/WxBS-step-by-step.html). ## Quick expansion This algorithm significantly changed computer vision landscape for next forteen years. Soon after the introduction of the WBS algorithm, it became clear that its quality significantly depends on the quality of each component, i.e., local feature detector, descriptor, and geometry estimation. Local feature detectors were designed to be as invariant as possible, backed up by the scale-space theory, most notable developed by Lindenberg \cite{Lindeberg1993, Lindeberg1998, lindeberg2013scale}. A plethora of new detectors and descriptors were proposed in that time. We refer the interested reader to these two surveys: by Tuytelaars and Mikolajczyk \cite{Tuytelaars2008} (2008) and by Csurka \etal \cite{CsurkaReview2018} (2018). Among the proposed local features is one of the most cited computer vision papers ever -- SIFT local feature \cite{Lowe99, SIFT2004}. Besides the SIFT descriptor itself, Lowe's paper incorporated several important steps, proposed earlier with his co-authors, to the matching pipeline. Specifically, they are quadratic fitting of the feature responses for precise keypoint localization \cite{QuadInterp2002}, using the Best-Bin-First kd-tree \cite{aknn1997} as an approximate nearest neightbor search engine to speed-up the tentative correspondences generation, and using second-nearest neighbor (SNN) ratio to filter the tentative matches. It is worth noting that SIFT feature became popular only after Mikolajczyk benchmark paper \cite{MikoDescEval2003, Mikolajczyk05} that showed its superiority to the rest of alternatives. Robust geometry estimation was also a hot topic: a lot of improvements over vanilla RANSAC were proposed. For example, LO-RANSAC \cite{LOransac2003} proposed an additional local optimization step into RANSAC to significantly decrease the number of required steps. PROSAC \cite{PROSAC2005} takes into account the tentative correspondences matching score during sampling to speed up the procedure. DEGENSAC \cite{Degensac2005} improved the quality of the geometry estimation in the presence of a dominant plane in the images, which is the typical case for urban images. We refer the interested reader to the survey by Choi \etal \cite{RANSACSurvey2009}. Success of wide baseline stereo with SIFT features led to aplication of its components to other computer vision tasks, which were reformulated through wide baseline stereo lens: - **Scalable image search**. Sivic and Zisserman in famous "Video Google" paper\cite{VideoGoogle2003} proposed to treat local features as "visual words" and use ideas from text processing for searching in image collections. Later even more WBS elements were re-introduced to image search, most notable -- **spatial verification**\cite{Philbin07}: simplified RANSAC procedure to verify if visual word matches were spatially consistent. ![](00_intro_files/att_00004.png "Bag of words image search. Image credit: Filip Radenovic http://cmp.felk.cvut.cz/ radenfil/publications/Radenovic-CMPcolloq-2015.11.12.pdf") - **Image classification** was performed by placing some classifier (SVM, random forest, etc) on top of some encoding of the SIFT-like descriptors, extracted sparsely\cite{Fergus03, CsurkaBoK2004} or densely\cite{Lazebnik06}. ![](00_intro_files/att_00005.png "Bag of local features representation for classification from Fergus03") - **Object detection** was formulated as relaxed wide baseline stereo problem\cite{Chum2007Exemplar} or as classification of SIFT-like features inside a sliding window \cite{HoG2005} ![](00_intro_files/att_00003.png "Exemplar-representation of the classes using local features, cite{Chum2007Exemplar}") <!--- ![HoG-based pedestrian detection algorithm](00_intro_files/att_00006.png) ![Histogram of gradient visualization](00_intro_files/att_00007.png) --> - **Semantic segmentation** was performed by classicication of local region descriptors, typically, SIFT and color features and postprocessing afterwards\cite{Superparsing2010}. Of course,wide baseline stereo was also used for its direct applications: - **3D reconstruction** was based on camera poses and 3D points, estimated with help of SIFT features \cite{PhotoTourism2006, RomeInDay2009, COLMAP2016} ![](00_intro_files/att_00008.png "SfM pipeline from COLMAP") - **SLAM(Simultaneous localization and mapping)** \cite{Se02, PTAM2007, Mur15} were based on fast version of local feature detectors and descriptors. <!--- ![ORBSLAM pipeline](00_intro_files/att_00009.png) --> - **Panorama stiching** \cite{Brown07} and, more generally, **feature-based image registration**\cite{DualBootstrap2003} were initalized with a geometry obtained by WBS and then further optimized ## Deep Learning Invasion: retreal to the geometrical fortress In 2012 the deep learning-based AlexNet \cite{AlexNet2012} approach beat all methods in image classification at the ImageNet Large Scale Visual Recognition Challenge (ILSVRC). Soon after, Razavian et al.\cite{Astounding2014} have shown that convolutional neural networks (CNNs) pre-trained on the Imagenet outperform more complex traditional solutions in image and scene classification, object detection and image search, see Figure below. The performance gap between deep leaning and "classical" solutions was large and quickly increasing. In addition, deep learning pipelines, be it off-the-shelf pretrained, fine-tuned or the end-to-end learned networks, are simple from the engineering perspective. That is why the deep learning algorithms quickly become the default option for lots of computer vision problems. ![](00_intro_files/att_00010.png "CNN representation beats complex traditional pipelines. Reds are CNN-based and greens are the handcrafted. From Astounding2014") However, there was still a domain, where deep learned solutions failed, sometimes spectacularly: geometry-related tasks. Wide baseline stereo \cite{Melekhov2017relativePoseCnn}, visual localization \cite{PoseNet2015} and SLAM are still areas, where the classical wide baseline stereo dominates \cite{sattler2019understanding, zhou2019learn, pion2020benchmarking}. The full reasons why convolution neural network pipelines are struggling to perform tasks that are related to geometry, and how to fix that, are yet to be understood. The observations from the recent papers are following: - CNN-based pose predictions predictions are roughly equivalent to the retrieval of the most similar image from the training set and outputing its pose \cite{sattler2019understanding}. This kind of behaviour is also observed in a related area: single-view 3D reconstruction performed by deep networks is essentially a retrieval of the most similar 3D model from the training set \cite{Tatarchenko2019}. - Geometric and arithmetic operations are hard to represent via vanilla neural networks (i.e., matrix multiplication followed by non-linearity) and they may require specialized building blocks, approximating operations of algorithmic or geometric methods, e.g. spatial transformers \cite{STN2015} and arithmetic units \cite{NALU2018,NAU2020}. Even with such special-purpose components, the deep learning solutions require "careful initialization, restricting parameter space, and regularizing for sparsity" \cite{NAU2020}. - Vanilla CNNs suffer from sensitivity to geometric transformations like scaling and rotation \cite{GroupEqCNN2016} or even translation \cite{MakeCNNShiftInvariant2019}. The sensitivity to translations might sound counter-intuitive, because the concolution operation by definition is translation-covariant. However, a typical CNN contains also zero-padding and downscaling operations, which break the covariance \cite{MakeCNNShiftInvariant2019, AbsPositionCNN2020}. Unlike them, classical local feature detectors are grounded on scale-space \cite{lindeberg2013scale} and image processing theories. Some of the classical methods deal with the issue by explicit geometric normalization of the patches before description. - CNNs predictions can be altered by a change in a small localized area \cite{AdvPatch2017} or even a single pixel \cite{OnePixelAttack2019}, while the wide baseline stereo methods require the consensus of different independent regions. ## Today: assimilation and merging ### Wide baseline stereo as a task: formulate differentiably and learn modules This leads us to the following question -- **is deep learning helping WxBS today?** The answer is yes. After the quick interest in the black-box-style models, the current trend is to design deep learning solutions for the wide baseline stereo in a modular fashion \cite{cv4action2019}, resembling the one in Figure below. Such modules are learned separately. For example, the HardNet \cite{HardNet2017} descriptor replaces SIFT local descriptor. The Hessian detector can be replaced by deep learned detectors like KeyNet \cite{KeyNet2019} or the joint detector-descriptor \cite{SuperPoint2017, R2D22019, D2Net2019}. The matching and filtering are performed by the SuperGlue \cite{sarlin2019superglue} matching network, etc. There have been attempts to formulate the full pipeline solving problem like SLAM \cite{gradslam2020} in a differentiable way, combining the advantages of structured and learning-based approaches. ![](00_intro_files/att_00011.png "SuperGlue: separate matching module for handcrafter and learned features") ![](00_intro_files/gradslam.png "gradSLAM: differentiable formulation of SLAM pipeline") ### Wide baseline stereo as a idea: consensus of local independent predictions On the other hand, as an algorithm, wide baseline stereo is summarized into two main ideas 1. Image should be represented as set of local parts, robust to occlusion, and not influencing each other. 2. Decision should be based on spatial consensus of local feature correspondences. One of modern revisit of wide baseline stereo ideas is Capsule Networks\cite{CapsNet2011,CapsNet2017}. Unlike vanilla CNNs, capsule networks encode not only the intensity of feature response, but also its location. Geometric agreement between "object parts" is a requirement for outputing a confident prediction. Similar ideas are now explored for ensuring adversarial robustness of CNNs\cite{li2020extreme}. Another way of using "consensus of local independent predictions" is used in [Cross-transformers](https://arxiv.org/abs/2007.11498) paper: spatial attention helps to select relevant feature for few-shot learning, see Figure below. While wide multiple baseline stereo is a mature field now and does not attract even nearly as much attention as before, it continues to play an important role in computer vision. ![](2020-03-27-intro_files/att_00000.png "Cross-transformers: spatial attention helps to select relevant feature for few-shot learning") ![](00_intro_files/capsules.png "Capsule networks: revisiting the WBS idea. Each feature response is accompanied with its pose. Poses should be in agreement, otherwise object would not be recognized. Image by Aurélien Géron https://www.oreilly.com/content/introducing-capsule-networks/") # References [<a id="cit-Hannah1974ComputerMO" href="#call-Hannah1974ComputerMO">Hannah1974ComputerMO</a>] M. J., ``_Computer matching of areas in stereo images._'', 1974. [<a id="cit-Moravec1980" href="#call-Moravec1980">Moravec1980</a>] Hans Peter Moravec, ``_Obstacle Avoidance and Navigation in the Real World by a Seeing Robot Rover_'', 1980. [<a id="cit-Hartley2004" href="#call-Hartley2004">Hartley2004</a>] R.~I. Hartley and A. Zisserman, ``_Multiple View Geometry in Computer Vision_'', 2004. [<a id="cit-Schmid1995" href="#call-Schmid1995">Schmid1995</a>] Schmid Cordelia and Mohr Roger, ``_Matching by local invariants_'', , vol. , number , pp. , 1995. [online](https://hal.inria.fr/file/index/docid/74046/filename/RR-2644.pdf) [<a id="cit-Harris88" href="#call-Harris88">Harris88</a>] C. Harris and M. Stephens, ``_A Combined Corner and Edge Detector_'', Fourth Alvey Vision Conference, 1988. [<a id="cit-forstner1987fast" href="#call-forstner1987fast">forstner1987fast</a>] W. F{\"o}rstner and E. G{\"u}lch, ``_A fast operator for detection and precise location of distinct points, corners and centres of circular features_'', Proc. ISPRS intercommission conference on fast processing of photogrammetric data, 1987. [<a id="cit-Hessian78" href="#call-Hessian78">Hessian78</a>] P.R. Beaudet, ``_Rotationally invariant image operators_'', Proceedings of the 4th International Joint Conference on Pattern Recognition, 1978. [<a id="cit-Beardsley96" href="#call-Beardsley96">Beardsley96</a>] P. Beardsley, P. Torr and A. Zisserman, ``_3D model acquisition from extended image sequences_'', ECCV, 1996. [<a id="cit-RANSAC1981" href="#call-RANSAC1981">RANSAC1981</a>] Fischler Martin A. and Bolles Robert C., ``_Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography_'', Commun. ACM, vol. 24, number 6, pp. 381--395, jun 1981. [<a id="cit-Pritchett1998" href="#call-Pritchett1998">Pritchett1998</a>] P. Pritchett and A. Zisserman, ``_Wide baseline stereo matching_'', ICCV, 1998. [<a id="cit-Pritchett1998b" href="#call-Pritchett1998b">Pritchett1998b</a>] P. Pritchett and A. Zisserman, ``_"Matching and Reconstruction from Widely Separated Views"_'', 3D Structure from Multiple Images of Large-Scale Environments, 1998. [<a id="cit-WBSTorr99" href="#call-WBSTorr99">WBSTorr99</a>] P. Torr and A. Zisserman, ``_Feature Based Methods for Structure and Motion Estimation_'', Workshop on Vision Algorithms, 1999. [<a id="cit-CsurkaReview2018" href="#call-CsurkaReview2018">CsurkaReview2018</a>] {Csurka} Gabriela, {Dance} Christopher R. and {Humenberger} Martin, ``_From handcrafted to deep local features_'', arXiv e-prints, vol. , number , pp. , 2018. [<a id="cit-IMW2020" href="#call-IMW2020">IMW2020</a>] Jin Yuhe, Mishkin Dmytro, Mishchuk Anastasiia <em>et al.</em>, ``_Image Matching across Wide Baselines: From Paper to Practice_'', arXiv preprint arXiv:2003.01587, vol. , number , pp. , 2020. [<a id="cit-Lindeberg1993" href="#call-Lindeberg1993">Lindeberg1993</a>] Lindeberg Tony, ``_Detecting Salient Blob-like Image Structures and Their Scales with a Scale-space Primal Sketch: A Method for Focus-of-attention_'', Int. J. Comput. Vision, vol. 11, number 3, pp. 283--318, December 1993. [<a id="cit-Lindeberg1998" href="#call-Lindeberg1998">Lindeberg1998</a>] Lindeberg Tony, ``_Feature Detection with Automatic Scale Selection_'', Int. J. Comput. Vision, vol. 30, number 2, pp. 79--116, November 1998. [<a id="cit-lindeberg2013scale" href="#call-lindeberg2013scale">lindeberg2013scale</a>] Lindeberg Tony, ``_Scale-space theory in computer vision_'', , vol. 256, number , pp. , 2013. [<a id="cit-Tuytelaars2008" href="#call-Tuytelaars2008">Tuytelaars2008</a>] Tuytelaars Tinne and Mikolajczyk Krystian, ``_Local Invariant Feature Detectors: A Survey_'', Found. Trends. Comput. Graph. Vis., vol. 3, number 3, pp. 177--280, July 2008. [<a id="cit-Lowe99" href="#call-Lowe99">Lowe99</a>] D. Lowe, ``_Object Recognition from Local Scale-Invariant Features_'', ICCV, 1999. [<a id="cit-SIFT2004" href="#call-SIFT2004">SIFT2004</a>] Lowe David G., ``_Distinctive Image Features from Scale-Invariant Keypoints_'', International Journal of Computer Vision (IJCV), vol. 60, number 2, pp. 91--110, 2004. [<a id="cit-QuadInterp2002" href="#call-QuadInterp2002">QuadInterp2002</a>] M. Brown and D. Lowe, ``_Invariant Features from Interest Point Groups_'', BMVC, 2002. [<a id="cit-aknn1997" href="#call-aknn1997">aknn1997</a>] J.S. Beis and D.G. Lowe, ``_Shape Indexing Using Approximate Nearest-Neighbour Search in High-Dimensional Spaces_'', CVPR, 1997. [<a id="cit-MikoDescEval2003" href="#call-MikoDescEval2003">MikoDescEval2003</a>] K. Mikolajczyk and C. Schmid, ``_A Performance Evaluation of Local Descriptors_'', CVPR, June 2003. [<a id="cit-Mikolajczyk05" href="#call-Mikolajczyk05">Mikolajczyk05</a>] Mikolajczyk K., Tuytelaars T., Schmid C. <em>et al.</em>, ``_A Comparison of Affine Region Detectors_'', IJCV, vol. 65, number 1/2, pp. 43--72, 2005. [<a id="cit-LOransac2003" href="#call-LOransac2003">LOransac2003</a>] O. Chum, J. Matas and J. Kittler, ``_Locally Optimized RANSAC_'', Pattern Recognition, 2003. [<a id="cit-PROSAC2005" href="#call-PROSAC2005">PROSAC2005</a>] O. Chum and J. Matas, ``_Matching with PROSAC -- Progressive Sample Consensus_'', Proceedings of the 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05) - Volume 1 - Volume 01, 2005. [<a id="cit-Degensac2005" href="#call-Degensac2005">Degensac2005</a>] O. Chum, T. Werner and J. Matas, ``_Two-View Geometry Estimation Unaffected by a Dominant Plane_'', CVPR, 2005. [<a id="cit-RANSACSurvey2009" href="#call-RANSACSurvey2009">RANSACSurvey2009</a>] S. Choi, T. Kim and W. Yu, ``_Performance Evaluation of RANSAC Family._'', BMVC, 2009. [<a id="cit-VideoGoogle2003" href="#call-VideoGoogle2003">VideoGoogle2003</a>] J. Sivic and A. Zisserman, ``_Video Google: A Text Retrieval Approach to Object Matching in Videos_'', ICCV, 2003. [<a id="cit-Philbin07" href="#call-Philbin07">Philbin07</a>] J. Philbin, O. Chum, M. Isard <em>et al.</em>, ``_Object Retrieval with Large Vocabularies and Fast Spatial Matching_'', CVPR, 2007. [<a id="cit-Fergus03" href="#call-Fergus03">Fergus03</a>] R. Fergus, P. Perona and A. Zisserman, ``_Object Class Recognition by Unsupervised Scale-Invariant Learning_'', CVPR, 2003. [<a id="cit-CsurkaBoK2004" href="#call-CsurkaBoK2004">CsurkaBoK2004</a>] C.D. G. Csurka, J. Willamowski, L. Fan <em>et al.</em>, ``_Visual Categorization with Bags of Keypoints_'', ECCV, 2004. [<a id="cit-Lazebnik06" href="#call-Lazebnik06">Lazebnik06</a>] S. Lazebnik, C. Schmid and J. Ponce, ``_Beyond Bags of Features: Spatial Pyramid Matching for Recognizing Natural Scene Categories_'', CVPR, 2006. [<a id="cit-Chum2007Exemplar" href="#call-Chum2007Exemplar">Chum2007Exemplar</a>] O. {Chum} and A. {Zisserman}, ``_An Exemplar Model for Learning Object Classes_'', CVPR, 2007. [<a id="cit-HoG2005" href="#call-HoG2005">HoG2005</a>] N. {Dalal} and B. {Triggs}, ``_Histograms of oriented gradients for human detection_'', CVPR, 2005. [<a id="cit-Superparsing2010" href="#call-Superparsing2010">Superparsing2010</a>] J. Tighe and S. Lazebnik, ``_SuperParsing: Scalable Nonparametric Image Parsing with Superpixels_'', ECCV, 2010. [<a id="cit-PhotoTourism2006" href="#call-PhotoTourism2006">PhotoTourism2006</a>] Snavely Noah, Seitz Steven M. and Szeliski Richard, ``_Photo Tourism: Exploring Photo Collections in 3D_'', ToG, vol. 25, number 3, pp. 835–846, 2006. [<a id="cit-RomeInDay2009" href="#call-RomeInDay2009">RomeInDay2009</a>] Agarwal Sameer, Furukawa Yasutaka, Snavely Noah <em>et al.</em>, ``_Building Rome in a day_'', Communications of the ACM, vol. 54, number , pp. 105--112, 2011. [<a id="cit-COLMAP2016" href="#call-COLMAP2016">COLMAP2016</a>] J. Sch\"{o}nberger and J. Frahm, ``_Structure-From-Motion Revisited_'', CVPR, 2016. [<a id="cit-Se02" href="#call-Se02">Se02</a>] Se S., G. D. and Little J., ``_Mobile Robot Localization and Mapping with Uncertainty Using Scale-Invariant Visual Landmarks_'', IJRR, vol. 22, number 8, pp. 735--758, 2002. [<a id="cit-PTAM2007" href="#call-PTAM2007">PTAM2007</a>] G. {Klein} and D. {Murray}, ``_Parallel Tracking and Mapping for Small AR Workspaces_'', IEEE and ACM International Symposium on Mixed and Augmented Reality, 2007. [<a id="cit-Mur15" href="#call-Mur15">Mur15</a>] Mur-Artal R., Montiel J. and Tard{\'o}s J., ``_ORB-Slam: A Versatile and Accurate Monocular Slam System_'', IEEE Transactions on Robotics, vol. 31, number 5, pp. 1147--1163, 2015. [<a id="cit-Brown07" href="#call-Brown07">Brown07</a>] Brown M. and Lowe D., ``_Automatic Panoramic Image Stitching Using Invariant Features_'', IJCV, vol. 74, number , pp. 59--73, 2007. [<a id="cit-DualBootstrap2003" href="#call-DualBootstrap2003">DualBootstrap2003</a>] V. C., Tsai} {Chia-Ling and {Roysam} B., ``_The dual-bootstrap iterative closest point algorithm with application to retinal image registration_'', IEEE Transactions on Medical Imaging, vol. 22, number 11, pp. 1379-1394, 2003. [<a id="cit-AlexNet2012" href="#call-AlexNet2012">AlexNet2012</a>] Alex Krizhevsky, Ilya Sutskever and Geoffrey E., ``_ImageNet Classification with Deep Convolutional Neural Networks_'', 2012. [<a id="cit-Astounding2014" href="#call-Astounding2014">Astounding2014</a>] A. S., H. {Azizpour}, J. {Sullivan} <em>et al.</em>, ``_CNN Features Off-the-Shelf: An Astounding Baseline for Recognition_'', CVPRW, 2014. [<a id="cit-Melekhov2017relativePoseCnn" href="#call-Melekhov2017relativePoseCnn">Melekhov2017relativePoseCnn</a>] I. Melekhov, J. Ylioinas, J. Kannala <em>et al.</em>, ``_Relative Camera Pose Estimation Using Convolutional Neural Networks_'', , 2017. [online](https://arxiv.org/abs/1702.01381) [<a id="cit-PoseNet2015" href="#call-PoseNet2015">PoseNet2015</a>] A. Kendall, M. Grimes and R. Cipolla, ``_PoseNet: A Convolutional Network for Real-Time 6-DOF Camera Relocalization_'', ICCV, 2015. [<a id="cit-sattler2019understanding" href="#call-sattler2019understanding">sattler2019understanding</a>] T. Sattler, Q. Zhou, M. Pollefeys <em>et al.</em>, ``_Understanding the limitations of cnn-based absolute camera pose regression_'', CVPR, 2019. [<a id="cit-zhou2019learn" href="#call-zhou2019learn">zhou2019learn</a>] Q. Zhou, T. Sattler, M. Pollefeys <em>et al.</em>, ``_To Learn or Not to Learn: Visual Localization from Essential Matrices_'', ICRA, 2020. [<a id="cit-pion2020benchmarking" href="#call-pion2020benchmarking">pion2020benchmarking</a>] !! _This reference was not found in biblio.bib _ !! [<a id="cit-Tatarchenko2019" href="#call-Tatarchenko2019">Tatarchenko2019</a>] M. Tatarchenko, S.R. Richter, R. Ranftl <em>et al.</em>, ``_What Do Single-View 3D Reconstruction Networks Learn?_'', CVPR, 2019. [<a id="cit-STN2015" href="#call-STN2015">STN2015</a>] M. Jaderberg, K. Simonyan and A. Zisserman, ``_Spatial transformer networks_'', NeurIPS, 2015. [<a id="cit-NALU2018" href="#call-NALU2018">NALU2018</a>] A. Trask, F. Hill, S.E. Reed <em>et al.</em>, ``_Neural arithmetic logic units_'', NeurIPS, 2018. [<a id="cit-NAU2020" href="#call-NAU2020">NAU2020</a>] A. Madsen and A. Rosenberg, ``_Neural Arithmetic Units_'', ICLR, 2020. [<a id="cit-GroupEqCNN2016" href="#call-GroupEqCNN2016">GroupEqCNN2016</a>] T. Cohen and M. Welling, ``_Group equivariant convolutional networks_'', ICML, 2016. [<a id="cit-MakeCNNShiftInvariant2019" href="#call-MakeCNNShiftInvariant2019">MakeCNNShiftInvariant2019</a>] R. Zhang, ``_Making convolutional networks shift-invariant again_'', ICML, 2019. [<a id="cit-AbsPositionCNN2020" href="#call-AbsPositionCNN2020">AbsPositionCNN2020</a>] M. Amirul, S. Jia and N. D., ``_How Much Position Information Do Convolutional Neural Networks Encode?_'', ICLR, 2020. [<a id="cit-AdvPatch2017" href="#call-AdvPatch2017">AdvPatch2017</a>] T. Brown, D. Mane, A. Roy <em>et al.</em>, ``_Adversarial patch_'', NeurIPSW, 2017. [<a id="cit-OnePixelAttack2019" href="#call-OnePixelAttack2019">OnePixelAttack2019</a>] Su Jiawei, Vargas Danilo Vasconcellos and Sakurai Kouichi, ``_One pixel attack for fooling deep neural networks_'', IEEE Transactions on Evolutionary Computation, vol. 23, number 5, pp. 828--841, 2019. [<a id="cit-cv4action2019" href="#call-cv4action2019">cv4action2019</a>] Zhou Brady, Kr{\"a}henb{\"u}hl Philipp and Koltun Vladlen, ``_Does computer vision matter for action?_'', Science Robotics, vol. 4, number 30, pp. , 2019. [<a id="cit-HardNet2017" href="#call-HardNet2017">HardNet2017</a>] A. Mishchuk, D. Mishkin, F. Radenovic <em>et al.</em>, ``_Working Hard to Know Your Neighbor's Margins: Local Descriptor Learning Loss_'', NeurIPS, 2017. [<a id="cit-KeyNet2019" href="#call-KeyNet2019">KeyNet2019</a>] A. Barroso-Laguna, E. Riba, D. Ponsa <em>et al.</em>, ``_Key.Net: Keypoint Detection by Handcrafted and Learned CNN Filters_'', ICCV, 2019. [<a id="cit-SuperPoint2017" href="#call-SuperPoint2017">SuperPoint2017</a>] Detone D., Malisiewicz T. and Rabinovich A., ``_Superpoint: Self-Supervised Interest Point Detection and Description_'', CVPRW Deep Learning for Visual SLAM, vol. , number , pp. , 2018. [<a id="cit-R2D22019" href="#call-R2D22019">R2D22019</a>] J. Revaud, ``_R2D2: Repeatable and Reliable Detector and Descriptor_'', NeurIPS, 2019. [<a id="cit-D2Net2019" href="#call-D2Net2019">D2Net2019</a>] M. Dusmanu, I. Rocco, T. Pajdla <em>et al.</em>, ``_D2-Net: A Trainable CNN for Joint Detection and Description of Local Features_'', CVPR, 2019. [<a id="cit-sarlin2019superglue" href="#call-sarlin2019superglue">sarlin2019superglue</a>] P. Sarlin, D. DeTone, T. Malisiewicz <em>et al.</em>, ``_SuperGlue: Learning Feature Matching with Graph Neural Networks_'', CVPR, 2020. [<a id="cit-gradslam2020" href="#call-gradslam2020">gradslam2020</a>] J. Krishna Murthy, G. Iyer and L. Paull, ``_gradSLAM: Dense SLAM meets Automatic Differentiation _'', ICRA, 2020 . [<a id="cit-CapsNet2011" href="#call-CapsNet2011">CapsNet2011</a>] G.E. Hinton, A. Krizhevsky and S.D. Wang, ``_Transforming auto-encoders_'', ICANN, 2011. [<a id="cit-CapsNet2017" href="#call-CapsNet2017">CapsNet2017</a>] S. Sabour, N. Frosst and G.E. Hinton, ``_Dynamic routing between capsules_'', NeurIPS, 2017. [<a id="cit-li2020extreme" href="#call-li2020extreme">li2020extreme</a>] Li Jianguo, Sun Mingjie and Zhang Changshui, ``_Extreme Values are Accurate and Robust in Deep Networks_'', , vol. , number , pp. , 2020. [online](https://openreview.net/forum?id=H1gHb1rFwr)
github_jupyter
# Plotting with [cartopy](https://scitools.org.uk/cartopy/docs/latest/) From Cartopy website: * Cartopy is a Python package designed for geospatial data processing in order to produce maps and other geospatial data analyses. * Cartopy makes use of the powerful PROJ.4, NumPy and Shapely libraries and includes a programmatic interface built on top of Matplotlib for the creation of publication quality maps. * Key features of cartopy are its object oriented projection definitions, and its ability to transform points, lines, vectors, polygons and images between those projections. * You will find cartopy especially useful for large area / small scale data, where Cartesian assumptions of spherical data traditionally break down. If you’ve ever experienced a singularity at the pole or a cut-off at the dateline, it is likely you will appreciate cartopy’s unique features! ``` import numpy as np import matplotlib.pyplot as plt import xarray as xr import cartopy.crs as ccrs ``` # Read in data using xarray - Read in the Saildrone USV file either from a local disc `xr.open_dataset(file)` - change latitude and longitude to lat and lon `.rename({'longitude':'lon','latitude':'lat'})` ``` file = '../data/saildrone-gen_5-antarctica_circumnavigation_2019-sd1020-20190119T040000-20190803T043000-1440_minutes-v1.1564857794963.nc' ds_usv = ``` # Open the dataset, mask land, plot result * `ds_sst = xr.open_dataset(url)` * use `ds_sst = ds_sst.where(ds_sst.mask==1)` to mask values equal to 1 ``` #If you are offline use the first url #url = '../data/20111101120000-CMC-L4_GHRSST-SSTfnd-CMC0.2deg-GLOB-v02.0-fv02.0.nc' url = 'https://podaac-opendap.jpl.nasa.gov/opendap/allData/ghrsst/data/GDS2/L4/GLOB/CMC/CMC0.2deg/v2/2011/305/20111101120000-CMC-L4_GHRSST-SSTfnd-CMC0.2deg-GLOB-v02.0-fv02.0.nc' ``` ## explore the in situ data and quickly plot using cartopy * first set up the axis with the projection you want: https://scitools.org.uk/cartopy/docs/latest/crs/projections.html * plot to that axis and tell the projection that your data is in #### Run the cell below and see what the image looks like. Then try adding in the lines below, one at a time, and re-run cell to see what happens * set a background image `ax.stock_img()` * draw coastlines `ax.coastlines(resolution='50m')` * add a colorbary and label it `cax = plt.colorbar(cs1)` `cax.set_label('SST (K)')` ``` #for polar data, plot temperature datamin = 0 datamax = 12 ax = plt.axes(projection=ccrs.SouthPolarStereo()) #here is where you set your axis projection (ds_sst.analysed_sst-273.15).plot(ax=ax, transform=ccrs.PlateCarree(), #set data projection vmin=datamin, #data min vmax=datamax) #data min cs1 = ax.scatter(ds_usv.lon, ds_usv.lat, transform=ccrs.PlateCarree(), #set data projection s=10.0, #size for scatter point c=ds_usv.TEMP_CTD_MEAN, #make the color of the scatter point equal to the USV temperature edgecolor='none', #no edgecolor cmap='jet', #colormap vmin=datamin, #data min vmax=datamax) #data max ax.set_extent([-180, 180, -90, -45], crs=ccrs.PlateCarree()) #data projection ``` # Plot the salinity * Take the code from above but use `c=ds_usv.SAL_MEAN` * Run the code, what looks wrong? * Change `datamin` and `datamax` ``` ``` # Let's plot some data off California * Read in data from a cruise along the California / Baja Coast * `ds_usv = xr.open_dataset(url).rename({'longitude':'lon','latitude':'lat'})` ``` #use the first URL if you are offline #url = '../data/saildrone-gen_4-baja_2018-sd1002-20180411T180000-20180611T055959-1_minutes-v1.nc' url = 'https://podaac-opendap.jpl.nasa.gov/opendap/hyrax/allData/insitu/L2/saildrone/Baja/saildrone-gen_4-baja_2018-sd1002-20180411T180000-20180611T055959-1_minutes-v1.nc' ``` * Plot the data using the code from above, but change the projection `ax = plt.axes(projection=ccrs.PlateCarree())` ``` ``` * Zoom into the region of the cruise * First calculate the lat/lon box<br> `lonmin,lonmax = ds_usv.lon.min().data-2,ds_usv.lon.max().data+2`<br> `latmin,latmax = ds_usv.lat.min().data-2,ds_usv.lat.max().data+2` * Then, after plotting the data, change the extent `ax.set_extent([lonmin,lonmax,latmin,latmax], crs=ccrs.PlateCarree())`
github_jupyter
# Gazebo proxy The Gazebo proxy is an implementation of interfaces with all services provided by the `gazebo_ros_pkgs`. It allows easy use and from of the simulation through Python. It can be configured for different `ROS_MASTER_URI` and `GAZEBO_MASTER_URI` environment variables to access instances of Gazebo running in other hosts/ports. The tutorial below will make use of the simulation manager to start instances of Gazebo. ``` # Importing the Gazebo proxy from pcg_gazebo.task_manager import GazeboProxy ``` The Gazebo proxy may also work with an instance of Gazebo that has been started external to the scope of this package, for example by running ``` roslaunch gazebo_ros empty_world.launch ``` The only instance will be found by using the input hostname and ports for which they are running. Here we will use the simulation manager. ``` # If there is a Gazebo instance running, you can spawn the box into the simulation from pcg_gazebo.task_manager import Server # First create a simulation server server = Server() # Create a simulation manager named default server.create_simulation('default') simulation = server.get_simulation('default') # Run an instance of the empty.world scenario # This is equivalent to run # roslaunch gazebo_ros empty_world.launch # with all default parameters if not simulation.create_gazebo_empty_world_task(): raise RuntimeError('Task for gazebo empty world could not be created') # A task named 'gazebo' the added to the tasks list print(simulation.get_task_list()) # But it is still not running print('Is Gazebo running: {}'.format(simulation.is_task_running('gazebo'))) # Run Gazebo simulation.run_all_tasks() ``` Adding some models to the simulation to demonstrate the Gazebo proxy methods. ``` # Now create the Gazebo proxy with the default parameters. # If these input arguments are not provided, they will be used per default. gazebo_proxy = simulation.get_gazebo_proxy() # The timeout argument will be used raise an exception in case Gazebo # fails to start from pcg_gazebo.simulation import create_object from pcg_gazebo.generators import WorldGenerator generator = WorldGenerator(gazebo_proxy=gazebo_proxy) box = create_object('box') box.add_inertial(mass=20) print(box.to_sdf('model')) generator.spawn_model( model=box, robot_namespace='box_1', pos=[-2, -2, 3]) generator.spawn_model( model=box, robot_namespace='box_2', pos=[2, 2, 3]) ``` ## Pausing/unpausing the simulation ``` from time import time, sleep pause_timeout = 10 # seconds start_time = time() # Pausing simulation gazebo_proxy.pause() print('Simulation time before pause={}'.format(gazebo_proxy.sim_time)) while time() - start_time < pause_timeout: print('Gazebo paused, simulation time={}'.format(gazebo_proxy.sim_time)) sleep(1) print('Unpausing simulation!') gazebo_proxy.unpause() sleep(2) print('Simulation time after pause={}'.format(gazebo_proxy.sim_time)) ``` ## Get world properties The world properties return * Simulation time (`sim_time`) * List of names of models (`model_names`) * Is rendering enabled flag (`rendering_enabled`) The return of this function is simply the service object [`GetWorldProperties`](https://github.com/ros-simulation/gazebo_ros_pkgs/blob/kinetic-devel/gazebo_msgs/srv/GetWorldProperties.srv). ``` # The world properties returns the following gazebo_proxy.get_world_properties() ``` ## Model properties ``` # Get list of models gazebo_proxy.get_model_names() # Get model properties for model in gazebo_proxy.get_model_names(): print(model) print(gazebo_proxy.get_model_properties(model)) print('-----------------') # Get model state for model in gazebo_proxy.get_model_names(): print(model) print(gazebo_proxy.get_model_state(model_name=model, reference_frame='world')) print('-----------------') # Check if model exists print('Does ground_plane exist? {}'.format(gazebo_proxy.model_exists('ground_plane'))) print('Does my_model exist? {}'.format(gazebo_proxy.model_exists('my_model'))) # Get list of link names for a model for model in gazebo_proxy.get_model_names(): print(model) print(gazebo_proxy.get_link_names(model)) print('-----------------') # Test if model has a link print('Does ground_plane have a link named link? {}'.format(gazebo_proxy.has_link(model_name='ground_plane', link_name='link'))) # Get link properties for model in gazebo_proxy.get_model_names(): print(model) for link in gazebo_proxy.get_link_names(model_name=model): print(' - ' + link) print(gazebo_proxy.get_link_properties(model_name=model, link_name=link)) print('-----------------') print('==================') # Get link state for model in gazebo_proxy.get_model_names(): print(model) for link in gazebo_proxy.get_link_names(model_name=model): print(' - ' + link) print(gazebo_proxy.get_link_state(model_name=model, link_name=link)) print('-----------------') print('==================') ``` ## Get physics properties The physics properties returns the [GetPhysicsProperties](https://github.com/ros-simulation/gazebo_ros_pkgs/blob/kinetic-devel/gazebo_msgs/srv/GetPhysicsProperties.srv) response with the current parameters for the physics engine. Currently only the parameters for the ODE engine can be retrieved. ``` print(gazebo_proxy.get_physics_properties()) ``` ## Apply wrench ``` # Applying wrench to a link in the simulation # The input arguments are # - model_name # - link_name # - force: force vector [x, y, z] # - torque: torque vector [x, y, z] # - start_time: in seconds, if it is a value lower than simulation time, the wrench will be applied as soon as possible # - duration: in seconds # if duration < 0, apply wrench continuously without end # if duration = 0, do nothing # if duration < step size, apply wrench for one step size # - reference_point: [x, y, z] coordinate point where wrench will be applied wrt the reference frame # - reference_frame: reference frame for the reference point, if None it will be set as the provided model_name::link_name gazebo_proxy.apply_body_wrench( model_name='box_1', link_name='box', force=[100, 0, 0], torque=[0, 0, 100], start_time=0, duration=5, reference_point=[0, 0, 0], reference_frame=None) gazebo_proxy.apply_body_wrench( model_name='box_2', link_name='box', force=[10, 0, 200], torque=[0, 0, 150], start_time=0, duration=4, reference_point=[0, 0, 0], reference_frame=None) start_time = time() while time() - start_time < 10: sleep(1) ``` ## Move models in the simulation ``` gazebo_proxy.move_model( model_name='box_1', pos=[2, 2, 15], rot=[0, 0, 0], reference_frame='world') gazebo_proxy.move_model( model_name='box_2', pos=[-2, -1, 4], rot=[0, 0, 0], reference_frame='world') # End the simulation by killing the Gazebo task simulation.kill_all_tasks() ```
github_jupyter
``` import tensorflow as tf import numpy as np from copy import deepcopy epoch = 20 batch_size = 64 size_layer = 64 dropout_rate = 0.5 n_hops = 2 class BaseDataLoader(): def __init__(self): self.data = { 'size': None, 'val':{ 'inputs': None, 'questions': None, 'answers': None,}, 'len':{ 'inputs_len': None, 'inputs_sent_len': None, 'questions_len': None, 'answers_len': None} } self.vocab = { 'size': None, 'word2idx': None, 'idx2word': None, } self.params = { 'vocab_size': None, '<start>': None, '<end>': None, 'max_input_len': None, 'max_sent_len': None, 'max_quest_len': None, 'max_answer_len': None, } class DataLoader(BaseDataLoader): def __init__(self, path, is_training, vocab=None, params=None): super().__init__() data, lens = self.load_data(path) if is_training: self.build_vocab(data) else: self.demo = data self.vocab = vocab self.params = deepcopy(params) self.is_training = is_training self.padding(data, lens) def load_data(self, path): data, lens = bAbI_data_load(path) self.data['size'] = len(data[0]) return data, lens def build_vocab(self, data): signals = ['<pad>', '<unk>', '<start>', '<end>'] inputs, questions, answers = data i_words = [w for facts in inputs for fact in facts for w in fact if w != '<end>'] q_words = [w for question in questions for w in question] a_words = [w for answer in answers for w in answer if w != '<end>'] words = list(set(i_words + q_words + a_words)) self.params['vocab_size'] = len(words) + 4 self.params['<start>'] = 2 self.params['<end>'] = 3 self.vocab['word2idx'] = {word: idx for idx, word in enumerate(signals + words)} self.vocab['idx2word'] = {idx: word for word, idx in self.vocab['word2idx'].items()} def padding(self, data, lens): inputs_len, inputs_sent_len, questions_len, answers_len = lens self.params['max_input_len'] = max(inputs_len) self.params['max_sent_len'] = max([fact_len for batch in inputs_sent_len for fact_len in batch]) self.params['max_quest_len'] = max(questions_len) self.params['max_answer_len'] = max(answers_len) self.data['len']['inputs_len'] = np.array(inputs_len) for batch in inputs_sent_len: batch += [0] * (self.params['max_input_len'] - len(batch)) self.data['len']['inputs_sent_len'] = np.array(inputs_sent_len) self.data['len']['questions_len'] = np.array(questions_len) self.data['len']['answers_len'] = np.array(answers_len) inputs, questions, answers = deepcopy(data) for facts in inputs: for sentence in facts: for i in range(len(sentence)): sentence[i] = self.vocab['word2idx'].get(sentence[i], self.vocab['word2idx']['<unk>']) sentence += [0] * (self.params['max_sent_len'] - len(sentence)) paddings = [0] * self.params['max_sent_len'] facts += [paddings] * (self.params['max_input_len'] - len(facts)) for question in questions: for i in range(len(question)): question[i] = self.vocab['word2idx'].get(question[i], self.vocab['word2idx']['<unk>']) question += [0] * (self.params['max_quest_len'] - len(question)) for answer in answers: for i in range(len(answer)): answer[i] = self.vocab['word2idx'].get(answer[i], self.vocab['word2idx']['<unk>']) self.data['val']['inputs'] = np.array(inputs) self.data['val']['questions'] = np.array(questions) self.data['val']['answers'] = np.array(answers) def bAbI_data_load(path, END=['<end>']): inputs = [] questions = [] answers = [] inputs_len = [] inputs_sent_len = [] questions_len = [] answers_len = [] for d in open(path): index = d.split(' ')[0] if index == '1': fact = [] if '?' in d: temp = d.split('\t') q = temp[0].strip().replace('?', '').split(' ')[1:] + ['?'] a = temp[1].split() + END fact_copied = deepcopy(fact) inputs.append(fact_copied) questions.append(q) answers.append(a) inputs_len.append(len(fact_copied)) inputs_sent_len.append([len(s) for s in fact_copied]) questions_len.append(len(q)) answers_len.append(len(a)) else: tokens = d.replace('.', '').replace('\n', '').split(' ')[1:] + END fact.append(tokens) return [inputs, questions, answers], [inputs_len, inputs_sent_len, questions_len, answers_len] train_data = DataLoader(path='qa5_three-arg-relations_train.txt',is_training=True) test_data = DataLoader(path='qa5_three-arg-relations_test.txt',is_training=False, vocab=train_data.vocab, params=train_data.params) START = train_data.params['<start>'] END = train_data.params['<end>'] def hop_forward(question, memory_o, memory_i, response_proj, inputs_len, questions_len, is_training): match = tf.matmul(question, memory_i, transpose_b=True) match = pre_softmax_masking(match, inputs_len) match = tf.nn.softmax(match) match = post_softmax_masking(match, questions_len) response = tf.matmul(match, memory_o) return response_proj(tf.concat([response, question], -1)) def pre_softmax_masking(x, seq_len): paddings = tf.fill(tf.shape(x), float('-inf')) T = tf.shape(x)[1] max_seq_len = tf.shape(x)[2] masks = tf.sequence_mask(seq_len, max_seq_len, dtype=tf.float32) masks = tf.tile(tf.expand_dims(masks, 1), [1, T, 1]) return tf.where(tf.equal(masks, 0), paddings, x) def post_softmax_masking(x, seq_len): T = tf.shape(x)[2] max_seq_len = tf.shape(x)[1] masks = tf.sequence_mask(seq_len, max_seq_len, dtype=tf.float32) masks = tf.tile(tf.expand_dims(masks, -1), [1, 1, T]) return (x * masks) def shift_right(x): batch_size = tf.shape(x)[0] start = tf.to_int32(tf.fill([batch_size, 1], START)) return tf.concat([start, x[:, :-1]], 1) def embed_seq(x, vocab_size, zero_pad=True): lookup_table = tf.get_variable('lookup_table', [vocab_size, size_layer], tf.float32) if zero_pad: lookup_table = tf.concat((tf.zeros([1, size_layer]), lookup_table[1:, :]), axis=0) return tf.nn.embedding_lookup(lookup_table, x) def position_encoding(sentence_size, embedding_size): encoding = np.ones((embedding_size, sentence_size), dtype=np.float32) ls = sentence_size + 1 le = embedding_size + 1 for i in range(1, le): for j in range(1, ls): encoding[i-1, j-1] = (i - (le-1)/2) * (j - (ls-1)/2) encoding = 1 + 4 * encoding / embedding_size / sentence_size return tf.convert_to_tensor(np.transpose(encoding)) def input_mem(x, vocab_size, max_sent_len, is_training): x = embed_seq(x, vocab_size) x = tf.layers.dropout(x, dropout_rate, training=is_training) pos = position_encoding(max_sent_len, size_layer) x = tf.reduce_sum(x * pos, 2) return x def quest_mem(x, vocab_size, max_quest_len, is_training): x = embed_seq(x, vocab_size) x = tf.layers.dropout(x, dropout_rate, training=is_training) pos = position_encoding(max_quest_len, size_layer) return (x * pos) class QA: def __init__(self, vocab_size): self.questions = tf.placeholder(tf.int32,[None,None]) self.inputs = tf.placeholder(tf.int32,[None,None,None]) self.questions_len = tf.placeholder(tf.int32,[None]) self.inputs_len = tf.placeholder(tf.int32,[None]) self.answers_len = tf.placeholder(tf.int32,[None]) self.answers = tf.placeholder(tf.int32,[None,None]) self.training = tf.placeholder(tf.bool) max_sent_len = train_data.params['max_sent_len'] max_quest_len = train_data.params['max_quest_len'] max_answer_len = train_data.params['max_answer_len'] lookup_table = tf.get_variable('lookup_table', [vocab_size, size_layer], tf.float32) lookup_table = tf.concat((tf.zeros([1, size_layer]), lookup_table[1:, :]), axis=0) with tf.variable_scope('questions'): question = quest_mem(self.questions, vocab_size, max_quest_len, self.training) with tf.variable_scope('memory_o'): memory_o = input_mem(self.inputs, vocab_size, max_sent_len, self.training) with tf.variable_scope('memory_i'): memory_i = input_mem(self.inputs, vocab_size, max_sent_len, self.training) with tf.variable_scope('interaction'): response_proj = tf.layers.Dense(size_layer) for _ in range(n_hops): answer = hop_forward(question, memory_o, memory_i, response_proj, self.inputs_len, self.questions_len, self.training) question = answer with tf.variable_scope('memory_o', reuse=True): embedding = tf.get_variable('lookup_table') cell = tf.nn.rnn_cell.LSTMCell(size_layer) vocab_proj = tf.layers.Dense(vocab_size) state_proj = tf.layers.Dense(size_layer) init_state = state_proj(tf.layers.flatten(answer)) init_state = tf.layers.dropout(init_state, dropout_rate, training=self.training) helper = tf.contrib.seq2seq.TrainingHelper( inputs = tf.nn.embedding_lookup(embedding, shift_right(self.answers)), sequence_length = tf.to_int32(self.answers_len)) encoder_state = tf.nn.rnn_cell.LSTMStateTuple(c=init_state, h=init_state) decoder = tf.contrib.seq2seq.BasicDecoder(cell = cell, helper = helper, initial_state = encoder_state, output_layer = vocab_proj) decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder = decoder, maximum_iterations = tf.shape(self.inputs)[1]) self.outputs = decoder_output.rnn_output helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding = embedding, start_tokens = tf.tile( tf.constant([START], dtype=tf.int32), [tf.shape(self.inputs)[0]]), end_token = END) decoder = tf.contrib.seq2seq.BasicDecoder( cell = cell, helper = helper, initial_state = encoder_state, output_layer = vocab_proj) decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder = decoder, maximum_iterations = max_answer_len) self.logits = decoder_output.sample_id correct_pred = tf.equal(self.logits[:,0], self.answers[:,0]) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) self.cost = tf.reduce_mean(tf.contrib.seq2seq.sequence_loss(logits = self.outputs, targets = self.answers, weights = tf.ones_like(self.answers, tf.float32))) self.optimizer = tf.train.AdamOptimizer().minimize(self.cost) tf.reset_default_graph() sess = tf.InteractiveSession() model = QA(train_data.params['vocab_size']) sess.run(tf.global_variables_initializer()) batching = (train_data.data['val']['inputs'].shape[0] // batch_size) * batch_size for i in range(epoch): total_cost, total_acc = 0, 0 for k in range(0, batching, batch_size): batch_questions = train_data.data['val']['questions'][k:k+batch_size] batch_inputs = train_data.data['val']['inputs'][k:k+batch_size] batch_inputs_len = train_data.data['len']['inputs_len'][k:k+batch_size] batch_questions_len = train_data.data['len']['questions_len'][k:k+batch_size] batch_answers_len = train_data.data['len']['answers_len'][k:k+batch_size] batch_answers = train_data.data['val']['answers'][k:k+batch_size] acc, cost, _ = sess.run([model.accuracy,model.cost,model.optimizer], feed_dict={model.questions:batch_questions, model.inputs:batch_inputs, model.inputs_len:batch_inputs_len, model.questions_len:batch_questions_len, model.answers_len:batch_answers_len, model.answers:batch_answers, model.training:True}) total_cost += cost total_acc += acc total_cost /= (train_data.data['val']['inputs'].shape[0] // batch_size) total_acc /= (train_data.data['val']['inputs'].shape[0] // batch_size) print('epoch %d, avg cost %f, avg acc %f'%(i+1,total_cost,total_acc)) testing_size = 32 batch_questions = test_data.data['val']['questions'][:testing_size] batch_inputs = test_data.data['val']['inputs'][:testing_size] batch_inputs_len = test_data.data['len']['inputs_len'][:testing_size] batch_questions_len = test_data.data['len']['questions_len'][:testing_size] batch_answers_len = test_data.data['len']['answers_len'][:testing_size] batch_answers = test_data.data['val']['answers'][:testing_size] logits = sess.run(model.logits, feed_dict={model.questions:batch_questions, model.inputs:batch_inputs, model.inputs_len:batch_inputs_len, model.questions_len:batch_questions_len, model.answers_len:batch_answers_len, model.training:False}) for i in range(testing_size): print('QUESTION:',' '.join([train_data.vocab['idx2word'][k] for k in batch_questions[i]])) print('REAL:',train_data.vocab['idx2word'][batch_answers[i,0]]) print('PREDICT:',train_data.vocab['idx2word'][logits[i,0]],'\n') ```
github_jupyter
#### 通过RNN使用imdb数据集完成情感分类任务 ``` from __future__ import absolute_import,print_function,division,unicode_literals import tensorflow as tf import tensorflow.keras as keras import numpy as np import os tf.__version__ tf.random.set_seed(22) np.random.seed(22) os.environ['TF_CPP_LOG_LEVEL'] = '2' # 超参数 vocab_size = 10000 max_review_length = 80 embedding_dim = 100 units = 64 num_classes = 2 batch_size = 32 epochs = 10 # 加载数据集 imdb = keras.datasets.imdb (train_data,train_labels),(test_data,test_labels) = imdb.load_data(num_words = vocab_size) train_data[0] len(train_data) # 建立词典 word_index = imdb.get_word_index() word_index = {k:(v + 3) for k ,v in word_index.items()} word_index["<PAD>"] = 0 word_index["<START>"] = 1 word_index["<UNK>"] = 2 word_index["<UNSED>"] = 3 reversed_word_index = dict([(value,key) for (key,value) in word_index.items()]) def decode_review(text): return ' '.join([reversed_word_index.get(i,'?') for i in text]) decode_review(train_data[0]) train_data = train_data[:20000] val_data = train_data[20000:25000] train_labels = train_labels[:20000] val_labels = train_labels[20000:25000] # 补齐数据 train_data = keras.preprocessing.sequence.pad_sequences(train_data,value = word_index["<PAD>"],padding = 'post',maxlen = max_review_length ) test_data = keras.preprocessing.sequence.pad_sequences(test_data,value = word_index["<PAD>"],padding = 'post',maxlen = max_review_length ) train_data[0] # 构建模型 class RNNModel(keras.Model): def __init__(self,units,num_classes,num_layers): super(RNNModel,self).__init__() self.units = units self.embedding = keras.layers.Embedding(vocab_size,embedding_dim,input_length = max_review_length) """ self.lstm = keras.layers.LSTM(units,return_sequences = True) self.lstm_2 = keras.layers.LSTM(units) """ self.lstm = keras.layers.Bidirectional(keras.layers.LSTM(self.units)) self.dense = keras.layers.Dense(1) def call(self,x,training = None,mask = None): x = self.embedding(x) x = self.lstm(x) x = self.dense(x) return x model.summary() model = RNNModel(units,num_classes,num_layers=2) model.compile(optimizer = keras.optimizers.Adam(0.001), loss = keras.losses.BinaryCrossentropy(from_logits = True), metrics = ['accuracy']) model.fit(train_data,train_labels, epochs = epochs,batch_size = batch_size, validation_data = (test_data,test_labels)) model.summary() result = model.evaluate(test_data,test_labels) # output:loss: 0.6751 - accuracy: 0.8002 def GRU_Model(): model = keras.Sequential([ keras.layers.Embedding(input_dim = vocab_size,output_dim = 32,input_length = max_review_length), keras.layers.GRU(32,return_sequences = True), keras.layers.GRU(1,activation = 'sigmoid',return_sequences = False) ]) model.compile(optimizer = keras.optimizers.Adam(0.001), loss = keras.losses.BinaryCrossentropy(from_logits = True), metrics = ['accuracy']) return model model = GRU_Model() model.summary() %%time history = model.fit(train_data,train_labels,batch_size = batch_size,epochs = epochs,validation_split = 0.1) import matplotlib.pyplot as plt plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.legend(['training','validation'], loc = 'upper left') plt.xlabel('epoch') plt.ylabel('accuracy') plt.show() ```
github_jupyter
``` from plot_helpers import * from source_files_extended import load_sfm_depth, load_aso_depth, load_classifier_data figure_style= dict(figsize=(8, 6)) aso_snow_depth_values = load_aso_depth() sfm_snow_depth_values = load_sfm_depth(aso_snow_depth_values.mask) ``` ## SfM snow depth distribution ``` data = [ { 'data': sfm_snow_depth_values, 'label': 'SfM', 'color': 'brown', } ] with Histogram.plot(data, (-5, 5), **figure_style) as ax: ax ``` ## Positive snow depth comparison ``` data = [ { 'data': aso_snow_depth_values, 'label': 'ASO', 'color': 'dodgerblue', }, { 'data': np.ma.masked_where(sfm_snow_depth_values <= 0.0, sfm_snow_depth_values, copy=True), 'label': 'SfM', 'color': 'brown', } ] with Histogram.plot(data, (0, 5), **figure_style) as ax: ax ``` ## Pixel Classification ``` casi_classification = load_classifier_data(aso_snow_depth_values.mask) casi_classes, classes_count = np.unique(casi_classification, return_counts=True) non_snow_casi = np.ma.masked_where(casi_classification == 1, casi_classification, copy=True) assert classes_count[1:4].sum() == np.count_nonzero(~non_snow_casi.mask) ``` ## ASO non-snow pixels depth values ``` data = [ { 'data': np.ma.masked_where(non_snow_casi.mask, aso_snow_depth_values, copy=True), 'label': 'ASO', 'color': 'dodgerblue', } ] with Histogram.plot(data, (0, 5), **figure_style) as ax: ax ``` ## CASI snow pixels snow depth values ``` data = [ { 'data': np.ma.masked_where(~non_snow_casi.mask, aso_snow_depth_values, copy=True), 'label': 'ASO', 'color': 'steelblue', }, { 'data': np.ma.masked_where(~non_snow_casi.mask, sfm_snow_depth_values, copy=True), 'label': 'SfM', 'color': 'beige', 'alpha': 0.7, } ] with Histogram.plot(data, (0, 5), **figure_style) as ax: ax.axvline(x=0.08, linestyle='dotted', color='dimgrey', label='ASO Precision') ``` ## SfM positive values ``` data = [ { 'data': np.ma.masked_where(sfm_snow_depth_values < 0, aso_snow_depth_values, copy=True), 'label': 'ASO', 'color': 'steelblue', }, { 'data': np.ma.masked_where(sfm_snow_depth_values < 0, sfm_snow_depth_values, copy=True), 'label': 'SfM', 'color': 'beige', 'alpha': 0.7, } ] with Histogram.plot(data, (0, 5), **figure_style) as ax: ax.axvline(x=0.08, linestyle='dotted', color='dimgrey', label='ASO Precision') ax.set_title('SfM positive area snow depth values'); ```
github_jupyter
# **Numba** ### Numba is a JIT Compiler and uses LLVM internally - No compilation required ! ![](./img/numba_flowchart.png) ``` import time def get_time_taken(func, *args): res = func(*args) start = time.time() func(*args) end = time.time() time_taken = end - start print(f"Total time - {time_taken:.5f} seconds") print(res) from numba import jit from math import tan, atan @jit def slow_function(n): result = 0 for x in range(n ** 7): result += tan(x) * atan(x) return result get_time_taken(slow_function, 10) ``` ### The speed up is obvious but there are a lot of caveats ### For example, any function used must also be "decorated" ``` from numba import jit, int32 @jit(int32(int32), nopython=True) def func(x): return tan(x) * atan(x) @jit(int32(int32), nopython=True) def slow_function(n): result = 0 for x in range(n ** 7): result += func(x) return result get_time_taken(slow_function, 10) ``` ### Notice the slight overhead ``` from numba import prange,jit, int32 @jit(int32(int32), nopython=True, parallel=True) def slow_function(n): result = 0 for x in prange(n ** 7): result += tan(x) * atan(x) return result get_time_taken(slow_function, 10) ``` ### prange is the parallel version of the range function in python and parallel=True option optimizes the code to use all the cores ### Lets see how it works with Numpy ``` from numba import jit, int32 import numpy as np @jit(int32(int32), nopython=True) def slow_func_in_numpy(n): result = 0 for x in np.arange(n ** 7): result += np.tan(x) * np.arctan(x) return result get_time_taken(slow_func_in_numpy, 10) ``` ### Do I have to write functions for every type? ``` from numba import jit, int32, int64, float32, float64 from math import tan, atan @jit([int32(int32), int64(int64), float32(float32), float64(float64)]) def slow_function(n): result = 0 for x in range(n ** 7): result += tan(x) * atan(x) return result get_time_taken(slow_function, 10) get_time_taken(slow_function, 10.2) ``` ### Let's see how we can create numpy ufuncs using numba ``` from numba import vectorize, int32, int64, float32, float64 import numpy as np @vectorize([int32(int32, int32), int64(int64, int64), float32(float32, float32), float64(float64, float64)]) def addfunc(x, y): return x + y @vectorize def simpler_addfunc(x, y): return x + y addfunc(2, 3) addfunc(6.42, 9.8) simpler_addfunc(2, 3.4) simpler_addfunc(np.array([1,2,3]), np.array([4,5,6])) ``` ### Limited support for classes ``` from numba import jitclass spec = [ ('x', int32), ('y', int32) ] @jitclass(spec) class Node(object): def __init__(self, x, y): self.x = x self.y = y def distance(self, n): return (self.x - n.x) ** 2 + (self.y - n.y) ** 2 def distance_from_point(self, x, y): return (self.x - x) ** 2 + (self.y - y) ** 2 n1 = Node(3,2) n2 = Node(9,6) %time n1.distance(n2) %time n1.distance_from_point(4,5) ``` ### This is just a glance into what numba can do, but remember, it does come with its own limitations Numba Limitations ================= 1. No Strings Support 2. No support for exception handling (try .. except, try .. finally) 3. No support for context management (the with statement) 4. list comprehension is supported, but not dict, set or generator comprehensions 5. No support for generator delegation (yield from) raise and assert are supported # **Exercise** Try using numba's @jit decorator with the function you wrote earlier and check with %time if there is any improvement in the performance **If you find any improvement, feel free to tweet about your experience with the handle @pyconfhyd**
github_jupyter
# Recommender Systems ### Reverse-engeneering users needs/desires Recommender systems have been in the heart of ML. Mostly that in order to get insigths on large populations it was necessary to understand how users behave, but this can only be done from the historical behaviour. Let's fix some setting that we use for the workshop. We have three main components: the business, the users, and the products. Most of the time a business would like to recommend products to its users. The business knows that the better it understands the user, the better the recommendations, and thus the user will be more likely to consume its products. Simple right? Well, not as much, the following things need to be considered: - What does it mean to know a user? How can we encode this? - If we have the purchase history of the user, do we want to recommend new items or old items? Why? - Business rules exists, like inventory, push products, maximize revenue, lower churn, etc. - What policies should be put in place? GDPR? - How to reduce bias. - Computational resources, speed. - Cold start for products and users. - Legacy systems. - UX integration. - etc Historically, two main approaches exist: collaborative filtering and content-based recommendations. These are often used together. # Collaborative Filtering ## Memory based - Easy to explain - Hard to scale - Not good for sparse data Usually based on similarity. ## Model based - Good for sparse - Difficult to explain - Hard to do inference Let's start with the most basic approach using a popular (light-weight) dataset [MovieLens](http://files.grouplens.org/datasets/movielens/ml-20m.zip) ``` import os import numpy as np import pandas as pd import matplotlib.pyplot as plt ``` ## Load MovieLens Data ``` data_dir = "../data/ml-20m" os.listdir(data_dir) movies = pd.read_csv(f"{data_dir}/movies.csv") ratings = pd.read_csv(f"{data_dir}/ratings.csv") ``` ## Exploring the data Use pandas to understand the distribution of your data, this helps understand what kind of goals are possible. It is always a good idea to extensively explore and understand the data. ### Exercise: 1. Choose a couple from the following list and use pandas to find out the answer: - What columns exist in the data? - What are the possible rankings? - How are the rankings distributed? - What is the average ranking? - What is the distribution of the average ranking among users? - How many genres are there? - What is the genre distribution for the movies? - What can you say about the timestamp? - Do all movies have a year? What is the distribution of Mystery movies during the years? 2. Come up with at least two more statistics that aren't from the above list. *Use the following couple of cells to answer your questions. Make sure to work on this before moving ahead.* ``` # What columns exist in the data? print(f"The movies dataset has columns: {movies.columns.values}") print(f"The ratings dataset has columns: {ratings.columns.values}") # What are the possible rankings? sorted(ratings.rating.unique()) # How are the rankings distributed? ratings[['userId', 'rating']].groupby('userId').mean().hist() plt.show() # What is the average ranking? print(f"The average rating is {round(ratings.rating.mean(), 2)}") ``` Note that the data is quite simple, we only have some info about the movies, which takes the form ``` movies.sample(1) ratings.sample(1) ``` Even though the information about the movies could help us create better recommenders, we won't be using it. Instead we only focus on the ratings dataframe. We can count the relevant users and movies from this: ``` ratings.nunique() ``` ~ 139K users and ~27K movies, rated in a 10 point scale. We can also plot two important pieces of information: - The histogram of how many ratings each movie has. - The histogram of how many ratings each user gives. ``` ratings.groupby("userId").agg({"movieId":len}).hist(bins=30) ratings.groupby("movieId").agg({"userId":len}).hist(bins=30) plt.show() np.log10(ratings.groupby("userId").agg({"movieId":len})).hist(bins=30) np.log10(ratings.groupby("movieId").agg({"userId":len})).hist(bins=30) plt.show() ``` The distribution (note the log) shows that most movies are rated by a handfull of users, and that most users don't rate many movies. Furthermore, note that an user x movie matrix should contain 756 million entries, but there are only 20 million ratings. this is only ~2.6 % of non-zero entries. That is we are in a sparse situation (which is not as bad in this case as it is in some other settings) ### Exercise: 1. According to the info above, for which movies/users is easier to make recommendations? Find at least one user or movie that you suspect is troublesome. 2. The dataframe encodes part of the user-item rating matrix. Suppose that you want to write this matrix, what is the size in GB of this matrix?
github_jupyter
# 1. Import libraries ``` #----------------------------Reproducible---------------------------------------------------------------------------------------- import numpy as np import random as rn import os seed=0 os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) rn.seed(seed) #----------------------------Reproducible---------------------------------------------------------------------------------------- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #-------------------------------------------------------------------------------------------------------------------------------- import matplotlib import matplotlib.pyplot as plt import matplotlib.cm as cm %matplotlib inline matplotlib.style.use('ggplot') import random import scipy.sparse as sparse import scipy.io from keras.utils import to_categorical from sklearn.ensemble import ExtraTreesClassifier from sklearn.model_selection import cross_val_score from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import scipy.io from skfeature.function.similarity_based import SPEC import time import pandas as pd #-------------------------------------------------------------------------------------------------------------------------------- def ETree(p_train_feature,p_train_label,p_test_feature,p_test_label,p_seed): clf = ExtraTreesClassifier(n_estimators=50, random_state=p_seed) # Training clf.fit(p_train_feature, p_train_label) # Training accuracy print('Training accuracy:',clf.score(p_train_feature, np.array(p_train_label))) print('Training accuracy:',accuracy_score(np.array(p_train_label),clf.predict(p_train_feature))) #print('Training accuracy:',np.sum(clf.predict(p_train_feature)==np.array(p_train_label))/p_train_label.shape[0]) # Testing accuracy print('Testing accuracy:',clf.score(p_test_feature, np.array(p_test_label))) print('Testing accuracy:',accuracy_score(np.array(p_test_label),clf.predict(p_test_feature))) #print('Testing accuracy:',np.sum(clf.predict(p_test_feature)==np.array(p_test_label))/p_test_label.shape[0]) #-------------------------------------------------------------------------------------------------------------------------------- def write_to_csv(p_data,p_path): dataframe = pd.DataFrame(p_data) dataframe.to_csv(p_path, mode='a',header=False,index=False,sep=',') ``` # 2. Loading data ``` train_data_arr=np.array(pd.read_csv('./Dataset/Activity/final_X_train.txt',header=None)) test_data_arr=np.array(pd.read_csv('./Dataset/Activity/final_X_test.txt',header=None)) train_label_arr=(np.array(pd.read_csv('./Dataset/Activity/final_y_train.txt',header=None))-1) test_label_arr=(np.array(pd.read_csv('./Dataset/Activity/final_y_test.txt',header=None))-1) data_arr=np.r_[train_data_arr,test_data_arr] label_arr=np.r_[train_label_arr,test_label_arr] label_arr_onehot=label_arr print(data_arr.shape) print(label_arr_onehot.shape) data_arr=MinMaxScaler(feature_range=(0,1)).fit_transform(data_arr) C_train_x,C_test_x,C_train_y,C_test_y= train_test_split(data_arr,label_arr_onehot,test_size=0.2,random_state=seed) x_train,x_validate,y_train_onehot,y_validate_onehot= train_test_split(C_train_x,C_train_y,test_size=0.1,random_state=seed) x_test=C_test_x y_test_onehot=C_test_y print('Shape of x_train: ' + str(x_train.shape)) print('Shape of x_validate: ' + str(x_validate.shape)) print('Shape of x_test: ' + str(x_test.shape)) print('Shape of y_train: ' + str(y_train_onehot.shape)) print('Shape of y_validate: ' + str(y_validate_onehot.shape)) print('Shape of y_test: ' + str(y_test_onehot.shape)) print('Shape of C_train_x: ' + str(C_train_x.shape)) print('Shape of C_train_y: ' + str(C_train_y.shape)) print('Shape of C_test_x: ' + str(C_test_x.shape)) print('Shape of C_test_y: ' + str(C_test_y.shape)) key_feture_number=50 ``` # 3. Classifying 1 ### Extra Trees ``` train_feature=C_train_x train_label=C_train_y test_feature=C_test_x test_label=C_test_y print('Shape of train_feature: ' + str(train_feature.shape)) print('Shape of train_label: ' + str(train_label.shape)) print('Shape of test_feature: ' + str(test_feature.shape)) print('Shape of test_label: ' + str(test_label.shape)) p_seed=seed ETree(train_feature,train_label,test_feature,test_label,p_seed) ``` # 4. Model ``` start = time.clock() # construct affinity matrix kwargs = {'style': 0} # obtain the scores of features, and sort the feature scores in an ascending order according to the feature scores train_score = SPEC.spec(train_feature, **kwargs) train_idx = SPEC.feature_ranking(train_score, **kwargs) # obtain the dataset on the selected features train_selected_x = train_feature[:, train_idx[0:key_feture_number]] print("train_selected_x",train_selected_x.shape) # obtain the scores of features, and sort the feature scores in an ascending order according to the feature scores test_score = SPEC.spec(test_feature, **kwargs) test_idx = SPEC.feature_ranking(test_score, **kwargs) # obtain the dataset on the selected features test_selected_x = test_feature[:, test_idx[0:key_feture_number]] print("test_selected_x",test_selected_x.shape) time_cost=time.clock() - start write_to_csv(np.array([time_cost]),"./log/SPEC_time"+str(key_feture_number)+".csv") C_train_selected_x=train_selected_x C_test_selected_x=test_selected_x C_train_selected_y=C_train_y C_test_selected_y=C_test_y print('Shape of C_train_selected_x: ' + str(C_train_selected_x.shape)) print('Shape of C_test_selected_x: ' + str(C_test_selected_x.shape)) print('Shape of C_train_selected_y: ' + str(C_train_selected_y.shape)) print('Shape of C_test_selected_y: ' + str(C_test_selected_y.shape)) ``` # 5. Classifying 2 ### Extra Trees ``` train_feature=C_train_selected_x train_label=C_train_y test_feature=C_test_selected_x test_label=C_test_y print('Shape of train_feature: ' + str(train_feature.shape)) print('Shape of train_label: ' + str(train_label.shape)) print('Shape of test_feature: ' + str(test_feature.shape)) print('Shape of test_label: ' + str(test_label.shape)) p_seed=seed ETree(train_feature,train_label,test_feature,test_label,p_seed) ``` # 6. Reconstruction loss ``` from sklearn.linear_model import LinearRegression def mse_check(train, test): LR = LinearRegression(n_jobs = -1) LR.fit(train[0], train[1]) MSELR = ((LR.predict(test[0]) - test[1]) ** 2).mean() return MSELR train_feature_tuple=(C_train_selected_x,C_train_x) test_feature_tuple=(C_test_selected_x,C_test_x) reconstruction_loss=mse_check(train_feature_tuple, test_feature_tuple) print(reconstruction_loss) ```
github_jupyter
<img src="./pictures/DroneApp_logo.png" style="float:right; max-width: 180px; display: inline" alt="INSA" /></a> <img src="./pictures/logo_sizinglab.png" style="float:right; max-width: 100px; display: inline" alt="INSA" /></a> # Frame design The objective of this study, is to optimize the overall design in terms of mass. For this target, the frame will be sized to withstand the resulting loads of two sizing scenarios: the **maximum take-off thrust (arms)** and a **landing with an impact speed of 1m/s (body,arms, landing gears)**. Due to the great diversity of existing models of drones in the market, a simple design of quad-copter was considered for further calculations and steps **Scipy** and **math** packages will be used for this notebook in order to illustrate the optimization algorithms of python. ``` import scipy import scipy.optimize from math import pi from math import sqrt from math import sin,cos,tan import math import numpy as np import timeit import pandas as pd import ipywidgets as widgets from ipywidgets import interactive from IPython.display import display, HTML pd.options.display.float_format = '{:,.2f}'.format ``` #### Frame drawing *Simplified design of the drone frame and nomenclature of geometrical parameters used.* <img src="./img/FrameDesign.jpg" alt="4-arms drone structure" width="800"/> ## Sizing scenarios ### Take-Off scenario A maximum force produced at the take-off $F_{TO}$ generates a bending moment $M_{TO}$ equivalent to: $M_{TO}=\frac{F_{TO}\cdot L_{arm}}{N_{arms}}$ The maximum stress $\sigma_{max}$ for a beam of rectangular cross-section is estimated with safety coefficient $k_s$ as: $\displaystyle\sigma_{max}=\frac{H_{arm}}{2} \frac{12 \cdot Thrust \cdot l_{arm}}{H_{arm}^4-(H_{arm}-2e)^4} \leq \frac{\sigma_{alloy}}{k_s}$ which can be written with dimensionless arm aspect ratio $\pi_{arm}=\frac{e}{H_{arm}}$: $\displaystyle H_{arm}\geq \left ( \frac{6 \cdot Thrust \cdot l_{arm} \cdot k_s}{\sigma_{alloy}(1-(1-2 \cdot \pi_{arm})^4)} \right )^{\frac{1}{3}}$ ### Crash sizing scenario The crash sizing scenario considers a maximum speed $V_{impact}$ of the drone when hitting the ground. At such speed the structure should resist (i.e. the maximum stress should not be exceeded) and for higher speeds, the landing gears are the parts that break as structural fuses. To calculate the equivalent maximum load resisted by the landing gears, the energy conservation law applies the kinetic energy stored in drone mass to potential energy in structural parts transitory deformation: \begin{equation} \begin{gathered} \frac{1}{2}k_{eq} \cdot \delta x^2= \frac{1}{2} M_{tot} \cdot V_{impact}^2 \\ \Rightarrow F_{max} =\frac{1}{4}( k_{eq} \cdot \delta x + M_{total} \cdot g)=\frac{1}{4}(V_{impact} \cdot \sqrt{k_{eq}M_{total}} + M_{total} \cdot g) \end{gathered} \end{equation} To calculate the maximum stress induced by the maximum load $F_{max}$ applied to one landing gear, the equivalent stiffness $k_{eq}$ should be determined. For this purpose, the problem is broken down into simpler structural parts and the equivalent stiffness $k_{eq}$ is expressed considering the effect of each stiffness on the whole part. \begin{equation} k_{eq} = 4 \cdot \frac{\overset{\sim}{k_1} \cdot \overset{\sim}{k_2}}{\overset{\sim}{k_1}+\overset{\sim}{k_2}} \end{equation} *Equivalent stiffness problem decomposition.* <img src="./img/crash.jpg" alt="Equivalent stiffness problem" width="800"/> ## Sizing Code The set of equations of a sizing code can generate typical issues such : - Underconstrained set of equations: the lacking equations can come from additional scenarios, estimation models or additional sizing variable. - overconstrained equations often due to the selection of a component on multiple critera: the adding of over-sizing coefficients and constraints in the optimization problem can generally fix this issue - algebraic loops often due to selection criteria requiring informations generally available after the selection **Underconstraint singularities** Example: two variables in one equation: - Equation: cross section side of a beam resisting a normal stress: $\displaystyle H=\sqrt[3]{\frac{6*M_{to}}{\sigma_{bc}*(1-(1-2*T)^4)}}$ - Variables: thickness ($T$), cross section side ($H$) - Geometrical restriction:$\displaystyle T<H$ - Strategy: $\displaystyle T=k_{TH}*H$ where 0<$k_{TH}$<1 The equation is thus transformed into an inequality and through a large number of iterations the value of both variables can be estimated. $\displaystyle H>\sqrt[3]{\frac{6*M_{to}}{\sigma_{bc}*(1-(1-2*k_{TH})^4)}}$ **Algebraic loop** : beta and Hlg to fulfill objective and contraints. The final optimization problem depends thus of these parameters: - $k_{TH}$: aspect ratio : ratio thickness (T) / side of the beam (H) < 1. Underconstraint - $k_{BH}$ aspect ratio : ratio body height (Hbody)/ height beam (H) > 1. Underconstraint - $ \theta$ landing gear angle (0 is vertical beam) 0<Teta<90. Algebraic Loop - $k_{TT}$ ratio landing gear thickness ( body side dimensions). Underconstraint - $k_{L}$ aspect ratio: Length body(Lbody)/length arm (Larm). Underconstraint - $Hlg$: Height of landing gear (space for battery or sensors). Algebraic Loop The sizing code is defined here in a function which can give: - an evaluation of the objective: here the frame mass - an evaluation of the constraints: here the normal stress at the landing gear and body core, battery dimensions. **Restrictions applied**: 1. **Strength of Materials (two constraints):** the stress resisted by the components(arm, body, landing gear), $\sigma_j$ must be lower than the maximum material stress. 2. **Geometry (one constraint)**: Volume of the body must be larger than the battery one's. 3. **Geometry (one constraint)**: The landing gear must be higher than the deformation caused during the impact and a possible camera or body hanging on the drone. ## Parameters definition ### General specifications ``` # Input Geometrical dimensions Larm=0.35 # [m] one arm length Narm=4 # [-] arms number VolBat=0.132*0.043*0.027 #[m^3] Volume Battery (https://www.miniplanes.fr/eflite-accu-lipo-4s-148v-3300mah-50c-prise-ec3) # Specifications for take off F_to=32 # [N] global drone force for the take off M_total=2 # [kg] total drone mass # Specifications for landing impact v_impact=1 # [m/s] impact speed #Payload specifications H_camera=0.057#[m] height camera ``` ### Material assumptions ``` # Material properties # for beeam and core Ey_bc=70.3e9 # [Pa] Young modulus Rho_bc=2700 # [kg/m^3] Volumic mass Sigma_bc=80e6 # [Pa] Elastic strength # for landing gear Ey_lg=2e9 # [Pa] Young modulus Rho_lg=1070 # [kg/m^3] Volumic mass Sigma_lg=39e6 # [Pa] Elastic strength ``` ### Design assumptions (constant) ``` k_sec=4 # [-] security coefficient ``` ### Design variable (to optimize) ``` k_TH=0.1 # [-] aspect ratio : ratio thickness (T) / side of the beam (H) < 1 k_BH=2 # [-] aspect ratio : ratio body height (Hbody)/ height beam (H) > 1 Teta=20/90*pi/2 # [rad] landing gear angle (0 is vertical beam) 0<Teta<90 k_TT=1 # [-] aspect ratio : ratio landing gear thickness (Tlg)/ thickness beam (T). > 1 k_L=0.5 # [-] aspect ratio: Length body(Lbody)/length arm (Larm)<1 Hlg=.1 # [m] Height of landing gear (space for battery or sensors) #Vector of parameters parameters= scipy.array((k_TH,k_BH,Teta,k_TT,k_L,Hlg)) # Optimization bounds # k_TH, k_BH, Theta, k_TT, k_L, H_LG bounds = [(0.15,0.4), (1,4), (30/90*pi/2,pi/2), (1,100), (0,1), (0.01,1.165)] ``` <a id='#section5'></a> ``` def SizingCode(param,arg): #Design Variables k_TH=param[0] k_BH=param[1] Teta=param[2] k_TT=param[3] k_L=param[4] Hlg=param[5] #### Beam Sizing - Take Off M_to=F_to/Narm*Larm*k_sec # [N.m] Moment applied in the drone center # H=(M_to/Sigma_bc/(1-(1-2*k_TH)**4))**(1/3) # [m] Side length of the beam H=(6*M_to/Sigma_bc/(1-(1-2*k_TH)**4))**(1/3) # [m] Side length of the beam T=k_TH*H # [m] Thickness of the side beam #### Body and Landing gear sizing - Landing impact # Body stiffness calculation Hbody=k_BH*H # [m] height of the body Ibody=1/12*((H+2*T)*Hbody**3-H*(Hbody-2*T)**3) # [m^4] Section inertia of the body Lbody=k_L*Larm #[m] length of the body K1=3*Ey_bc*Ibody/(Lbody)**3 # [N/m] equivalent stiffness of the body # Landing gear stiffness calculation Llg=Hlg/cos(Teta) # [m] Landing gear length Tlg=k_TT*T # [m] landing gear thickness Ilg=1/12*(Tlg**4) # [m^4] Section inertia of the landing gear rectangular section K2=3*Ey_lg*Ilg/Llg**3/sin(Teta) # [N/m] equivalent stiffness of the landing gear # Global stiffness Kg=K1*K2/(K1+K2)*Narm # [N/m] global stiffness of all the arms # Impact force Fimpact= (v_impact*(Kg*M_total)**(1/2)+M_total*9.81)*k_sec # [N] Total impact force, we assume all the landing gear impact together # Stress calculation in the landing gear M_LG=Fimpact/Narm*Hlg*tan(Teta) # [N.m] Moment applied in the landing gear Sigma_lg_impact=M_LG*(Tlg/2)/Ilg # [Pa] Max stress in the landing gear # Stress calculation in the body M_Body=(Fimpact/Narm*Lbody+M_LG) # [N.m] Moment applied in the body Sigma_body_impact=M_Body*(Hbody/2)/Ibody # [Pa] Max stress in the landing gear # Mass calculation Mbeams=Narm*Larm*(H**2-(H-2*T)**2)*Rho_bc #[kg] Total beams' mass MLG=Narm*Llg*Tlg**2*Rho_lg #[kg] Total landing gears' mass Mbody=Narm*(Lbody)*(Hbody*(H+2*T)-(Hbody-2*T)*H)*Rho_bc #[kg] Total body's mass Mframe=Mbeams+MLG+Mbody #[kg] total frame mass Vbody=(2*Lbody)**2*Hbody #[m^3] volume body to integer battery # Contraintes : stress constraints = [(Sigma_bc-Sigma_body_impact)/Sigma_body_impact,(Sigma_lg-Sigma_lg_impact)/Sigma_lg_impact,(Vbody-VolBat)/VolBat,(Hlg-Fimpact/(Narm*Kg)-H_camera)/(Hlg)] # Objectif : masse totale if arg=='Obj': return Mframe elif arg == 'ObjP': P = 0. # Penalisation nulle for C in constraints: if (C < 0.): P = P-1e9*C return Mframe + P #mass optimizatin elif arg=='Prt': col_names_opt = ['Type', 'Name', 'Min', 'Value', 'Max', 'Unit', 'Comment'] df_opt = pd.DataFrame() df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_TH', 'Min': bounds[0][0], 'Value': k_TH, 'Max': bounds[0][1], 'Unit': '[-]', 'Comment': 'Aspect ratio for the beam\'s thickness (T/H), '}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_BH', 'Min': bounds[1][0], 'Value': k_BH, 'Max': bounds[1][1], 'Unit': '[-]', 'Comment': 'Aspect ratio for the body\'s height (Hbody/H)'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Theta', 'Min': bounds[2][0], 'Value': Teta/pi*180, 'Max': bounds[2][1], 'Unit': '[-]', 'Comment': 'Angle of the landing gear w.r.t. the beam'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_TT', 'Min': bounds[3][0], 'Value': k_TT, 'Max': bounds[3][1], 'Unit': '[-]', 'Comment': 'Aspect ratio for the Landing gear\'s thickness (Tlg/T)'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_L', 'Min': bounds[4][0], 'Value': k_L, 'Max': bounds[4][1], 'Unit': '[-]', 'Comment': 'Aspect ratio: Length body(Lbody)/length arm (Larm) k_L'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Hlg', 'Min': bounds[5][0], 'Value': Hlg, 'Max': bounds[5][1], 'Unit': '[-]', 'Comment': 'Landing gear height'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Mbeams', 'Min': 0, 'Value': Mbeams, 'Max': '-', 'Unit': '[kg]', 'Comment': 'Total beams mass'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'MLG', 'Min': 0, 'Value': MLG, 'Max': '-', 'Unit': '[kg]', 'Comment': 'Total landing gear mass'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Mbody', 'Min': 0, 'Value': Mbody, 'Max': '-', 'Unit': '[kg]', 'Comment': 'Total body mass'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Const 0', 'Min': 0, 'Value': constraints[0], 'Max': '-', 'Unit': '[-]', 'Comment': 'Stress margin at the Body: (Sigma_bc-Sigma_body_impact)/Sigma_body_impact'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Const 1', 'Min': 0, 'Value': constraints[1], 'Max': '-', 'Unit': '[-]', 'Comment': 'Stress margin at the landing gears: (Sigma_lg-Sigma_lg_impact)/Sigma_lg_impact'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Const 2', 'Min': 0, 'Value': constraints[2], 'Max': '-', 'Unit': '[-]', 'Comment': '(Vbody-VolBat)/VolBat'}])[col_names_opt] df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Const 3', 'Min': 0, 'Value': constraints[3], 'Max': '-', 'Unit': '[-]', 'Comment': '(Hlg-Fimpact/(Narm*Kg)-H_camera)/(Hlg)'}])[col_names_opt] col_names = ['Type', 'Name', 'Value', 'Unit', 'Comment'] df = pd.DataFrame() df = df.append([{'Type': 'Arm', 'Name': 'Larm', 'Value': Larm, 'Unit': '[m]', 'Comment': 'Arm length'}])[col_names] df = df.append([{'Type': 'Arm', 'Name': 'H', 'Value': H, 'Unit': '[m]', 'Comment': 'Height beam'}])[col_names] df = df.append([{'Type': 'Arm', 'Name': 'T', 'Value': T, 'Unit': '[m]', 'Comment': 'Thickness arm'}])[col_names] df = df.append([{'Type': 'Body', 'Name': 'Lbody', 'Value': Lbody, 'Unit': '[m]', 'Comment': 'Body length'}])[col_names] df = df.append([{'Type': 'Body', 'Name': 'Hbody', 'Value': Hbody, 'Unit': '[m]', 'Comment': 'Body height'}])[col_names] df = df.append([{'Type': 'Body', 'Name': 'H+2*T', 'Value': H+2*T, 'Unit': '[m]', 'Comment': 'Body width'}])[col_names] df = df.append([{'Type': 'Crash', 'Name': 'v_impact', 'Value': v_impact, 'Unit': '[m/s]', 'Comment': 'Crash speed'}])[col_names] df = df.append([{'Type': 'Crash', 'Name': 'Kg', 'Value': Kg, 'Unit': '[N/m]', 'Comment': 'Global stiffness'}])[col_names] df = df.append([{'Type': 'Crash', 'Name': 'k_sec', 'Value': k_sec, 'Unit': '[-]', 'Comment': 'Safety coef.'}])[col_names] df = df.append([{'Type': 'Crash', 'Name': 'Fimpact', 'Value': Fimpact, 'Unit': '[N]', 'Comment': 'Max crash load'}])[col_names] pd.options.display.float_format = '{:,.3f}'.format def view(x=''): #if x=='All': return display(df) if x=='Optimization' : return display(df_opt) return display(df[df['Type']==x]) items = sorted(df['Type'].unique().tolist())+['Optimization'] w = widgets.Select(options=items) return display(df,df_opt) else: return constraints ``` <a id='#section6'></a> ## Optimization problem We will now use the [optimization algorithms](https://docs.scipy.org/doc/scipy/reference/optimize.html) of the Scipy package to solve and optimize the configuration. We use here the SLSQP algorithm without explicit expression of the gradient (Jacobian). A course on Multidisplinary Gradient optimization algorithms and gradient optimization algorithm is given [here](http://mdolab.engin.umich.edu/sites/default/files/Martins-MDO-course-notes.pdf): > Joaquim R. R. A. Martins (2012). A Short Course on Multidisciplinary Design Optimization. University of Michigan We can print of the characterisitcs of the problem before optimization with the initial vector of optimization variables: ``` # Initial characteristics before optimization print("-----------------------------------------------") print("Initial characteristics before optimization :") SizingCode(parameters,'Prt') print("-----------------------------------------------") # Optimization with SLSQP algorithm contrainte = lambda x: SizingCode(x, 'Const') objectif = lambda x: SizingCode(x, 'Obj') objectifP = lambda x: SizingCode(x, 'ObjP') SLSQP = False # Optimization algorithm choice if SLSQP == True: # SLSQP omptimisation result = scipy.optimize.fmin_slsqp(func=objectif, x0=parameters, bounds=bounds, f_ieqcons=contrainte, iter=1500, acc=1e-12) else: # Differential evolution omptimisation result = scipy.optimize.differential_evolution(func=objectifP, bounds=bounds, tol=1e-12) # Final characteristics after optimization print("-----------------------------------------------") print("Final characteristics after optimization :") if SLSQP == True: SizingCode(result,'Obj') SizingCode(result, 'Prt') else: SizingCode(result.x,'Obj') SizingCode(result.x, 'Prt') print("-----------------------------------------------") ```
github_jupyter
# Customizing visual appearance HoloViews elements like the `Scatter` points illustrated in the [Introduction](1-Introduction.ipynb) contain two types of information: - **Your data**, in as close to its original form as possible, so that it can be analyzed and accessed as you see fit. - **Metadata specifying what your data *is***, which allows HoloViews to construct a visual representation for it. What elements do *not* contain is: - The endless details that one might want to tweak about the visual representation, such as line widths, colors, fonts, and spacing. HoloViews is designed to let you work naturally with the meaningful features of your data, while making it simple to adjust the display details separately using the Options system. Among many other benefits, this separation of *content* from *presentation* simplifies your data analysis workflow, and makes it independent of any particular plotting backend. ## Visualizing neural spike trains To illustrate how the options system works, we will use a dataset containing ["spike"](https://en.wikipedia.org/wiki/Action_potential) (neural firing) events extracted from the recorded electrical activity of a [neuron](https://en.wikipedia.org/wiki/Neuron). We will be visualizing the first trial of this [publicly accessible neural recording](http://www.neuralsignal.org/data/04/nsa2004.4/433l019). First, we import pandas and holoviews and load our data: ``` import pandas as pd import holoviews as hv spike_train = pd.read_csv('../assets/spike_train.csv.gz') spike_train.head(n=3) ``` This dataset contains the spike times (in milliseconds) for each detected spike event in this five-second recording, along with a spiking frequency (in Hertz, averaging over a rolling 200 millisecond window). We will now declare ``Curve`` and ``Spike`` elements using this data and combine them into a ``Layout``: ``` curve = hv.Curve(spike_train, 'milliseconds', 'Hertz', group='Firing Rate') spikes = hv.Spikes(spike_train.sample(300), kdims='milliseconds', vdims=[], group='Spike Train') curve + spikes ``` Notice that the representation for this object is purely textual; so far we have not yet loaded any plotting system for HoloViews, and so all you can see is a description of the data stored in the elements. To be able to see a visual representation and adjust its appearance, we'll need to load a plotting system, and here let's load two so they can be compared: ``` hv.extension('bokeh', 'matplotlib') ``` Even though we can happily create, analyze, and manipulate HoloViews objects without using any plotting backend, this line is normally executed just after importing HoloViews so that objects can have a rich graphical representation rather than the very-limited textual representation shown above. Putting 'bokeh' first in this list makes visualizations default to using [Bokeh](http://bokeh.pydata.org), but including [matplotlib](http://matplotlib.org) as well means that backend can be selected for any particular plot as shown below. # Default appearance With the extension loaded, let's look at the default appearance as rendered with Bokeh: ``` curve + spikes ``` As you can see, we can immediately appreciate more about this dataset than we could from the textual representation. The curve plot, in particular, conveys clearly that the firing rate varies quite a bit over this 5-second interval. However, the spikes plot is much more difficult to interpret, because the plot is nearly solid black even though we already downsampled from 700 spikes to 300 spikes when we declared the element. One thing we can do is enable one of Bokeh's zoom tools and zoom in until individual spikes are clearly visible. Even then, though, it's difficult to relate the spiking and firing-rate representations to each other. Maybe we can do better by adjusting the display options away from their default settings? ## Customization Let's see what we can achieve when we do decide to customize the appearance: ``` %%output size=150 %%opts Curve [height=100 width=600 xaxis=None tools=['hover']] %%opts Curve (color='red' line_width=1.5) %%opts Spikes [height=100 width=600 yaxis=None] (color='grey' line_width=0.25) curve = hv.Curve( spike_train, 'milliseconds', 'Hertz') spikes = hv.Spikes(spike_train, 'milliseconds', []) (curve+spikes).cols(1) ``` Much better! It's the same underlying data, but now we can clearly see both the individual spike events and how they affect the moving average. You can also see how the moving average trails the actual spiking, due to how the window function was defined. A detailed breakdown of this exact customization is given in the [User Guide](../user_guide/03-Customizing_Plots.ipynb), but we can use this example to understand a number of important concepts: * The option system is based around keyword settings. * You can customize the output format using the ``%%output`` and the element appearance with the ``%%opts`` *cell magics*. * These *cell magics* affect the display output of the Jupyter cell where they are located. For use outside of the Jupyter notebook, consult the [User Guide](../user_guide/03-Customizing_Plots.ipynb) for equivalent Python-compatible syntax. * The layout container has a ``cols`` method to specify the number of columns in the layout. While the ``%%output`` cell magic accepts a simple list of keywords, we see some special syntax used in the ``%%opts`` magic: * The element type is specified following by special groups of keywords. * The keywords in square brackets ``[...]`` are ***plot options*** that instruct HoloViews how to build that type of plot. * The keywords in parentheses ``(...)`` are **style options** with keywords that are passed directly to the plotting library when rendering that type of plot. The corresponding [User Guide](../user_guide/03-Customizing_Plots.ipynb) entry explains the keywords used in detail, but a quick summary is that we have elongated the ``Curve`` and ``Scatter`` elements and toggled various axes with the ***plot options***. We have also specified the color and line widths of the [Bokeh glyphs](http://bokeh.pydata.org/en/latest/docs/user_guide/plotting.html) with the ***style options***. As you can see, these tools allow significant customization of how our elements appear. HoloViews offers many other tools for setting options either locally or globally, including the ``%output`` and ``%opts`` *line magics*, the ``.opts`` method on all HoloViews objects and the ``hv.output`` and ``hv.opts`` utilities. All these tools, how they work and details of the opts syntax can be found in the [User Guide](../user_guide/03-Customizing_Plots.ipynb). # Switching to matplotlib Now let's switch our backend to [matplotlib](http://matplotlib.org/) to show the same elements as rendered with different customizations, in a different output format (SVG), with a completely different plotting library: ``` %%output size=200 backend='matplotlib' fig='svg' %%opts Layout [sublabel_format='' vspace=0.1] %%opts Spikes [aspect=6 yaxis='bare'] (color='red' linewidth=0.25 ) %%opts Curve [aspect=6 xaxis=None show_grid=False] (color='blue' linewidth=2 linestyle='dashed') (hv.Curve(spike_train, 'milliseconds', 'Hertz') + hv.Spikes(spike_train, 'milliseconds', vdims=[])).cols(1) ``` Here we use the same tools with a different plotting extension. Naturally, a few changes needed to be made: * A few of the plotting options are different because of differences in how the plotting backends work. For instance, matplotlib uses ``aspect`` instead of setting ``width`` and ``height``. In some cases, but not all, HoloViews can smooth over such differences to make it simpler to switch backends. * The Bokeh hover tool is not supported by the matplotlib backend, as you might expect, nor are there any other interactive controls. * Some style options have different names; for instance, the Bokeh ``line_width`` option is called ``linewidth`` in matplotlib. * Containers like Layouts have plot options, but no style options, because they are processed by HoloViews itself. Here we adjust the gap betwen the plots using ``vspace``. Note that you can even write options that work across multiple backends, as HoloViews will ignore keywords that are not applicable to the current backend (as long as they are valid for *some* loaded backend). See the [User Guide](../user_guide/03-Customizing_Plots.ipynb) for more details. ## Persistent styles Let's switch back to the default (Bokeh) plotting extension for this notebook and apply the ``.select`` operation illustrated in the Introduction, to the ``spikes`` object we made earlier: ``` %output size=150 spikes.select(milliseconds=(2000,4000)) ``` Note how HoloViews remembered the Bokeh-specific styles we previously applied to the `spikes` object! This feature allows us to style objects once and then keep that styling as we work, without having to repeat the styles every time we work with that object. You can learn more about the output line magic and the exact semantics of the opts magic in the [User Guide](../user_guide/03-Customizing_Plots.ipynb). ## Setting axis labels If you look closely, the example above might worry you. First we defined our ``Spikes`` element with ``kdims=['milliseconds']``, which we then used as a keyword argument in ``select`` above. This is also the string used as the axis label. Does this mean we are limited to Python literals for axis labels, if we want to use the corresponding dimension with ``select``? Luckily, there is no limitation involved. Dimensions specified as strings are often convenient, but behind the scenes, HoloViews always uses a much richer ``Dimensions`` object which you can pass to the ``kdims`` and ``vdims`` explicitly (see the [User Guide](../user_guide/01-Annotating_Data.ipynb) for more information). One of the things each ``Dimension`` object supports is a long, descriptive ``label``, which complements the short programmer-friendly name. We can set the dimension labels on our existing ``spikes`` object as follows: ``` spikes= spikes.redim.label(milliseconds='Time in milliseconds (10⁻³ seconds)') curve = curve.redim.label(Hertz='Frequency (Hz)') (curve + spikes).select(milliseconds=(2000,4000)).cols(1) ``` As you can see, we can set long descriptive labels on our dimensions (including unicode) while still making use of the short dimension name in methods such as ``select``. Now that you know how to set up and customize basic visualizations, the next [Getting-Started sections](./3-Tabular_Datasets.ipynb) show how to work with various common types of data in HoloViews.
github_jupyter
<a href="https://colab.research.google.com/github/cxbxmxcx/EatNoEat/blob/master/Chapter_9_EatNoEat_Training.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import tensorflow as tf import numpy as np import random import matplotlib import matplotlib.pyplot as plt import math import glob import pickle import io import os import datetime import base64 from IPython.display import HTML from IPython import display as ipythondisplay from google.colab import drive drive.mount('/content/gdrive') use_NAS = False if use_NAS: IMG_SIZE = 224 # 299 for Inception, 224 for NASNetMobile IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3) else: IMG_SIZE = 299 # 299 for Inception, 224 for NASNetMobile IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3) def load_image(image_path): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, (IMG_SIZE, IMG_SIZE)) if use_NAS: img = tf.keras.applications.nasnet.preprocess_input(img) else: img = tf.keras.applications.inception_v3.preprocess_input(img) return img, image_path def create_model(image_batch): tf.keras.backend.clear_session() if use_NAS: # Create the base model from the pre-trained model base_model = tf.keras.applications.NASNetMobile(input_shape=IMG_SHAPE, include_top=False, weights='imagenet') else: # Create the base model from the pre-trained model base_model = tf.keras.applications.InceptionResNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet') feature_batch = base_model(image_batch) global_average_layer = tf.keras.layers.GlobalAveragePooling2D() feature_batch_average = global_average_layer(feature_batch) prediction_layer = tf.keras.layers.Dense(3) prediction_batch = prediction_layer(feature_batch_average) model = tf.keras.Sequential([ base_model, global_average_layer, prediction_layer]) base_learning_rate = 0.0001 model.compile(optimizer=tf.keras.optimizers.Nadam(lr=base_learning_rate), loss=tf.keras.losses.MeanAbsoluteError(), metrics=['mae', 'mse', 'accuracy']) return model import os from os import listdir my_drive = '/content/gdrive/My Drive/' image_folder = my_drive + 'TestImages/' models = my_drive + 'Models' training_folder = my_drive + "Traning/" def get_test_images(directory): images = [] for file in listdir(directory): if file.endswith(".jpg"): images.append(image_folder + file) return images images = get_test_images(image_folder) print(images) if len(images) < 0: raise Exception('Test images need to be loaded!') else: x, _ = load_image(images[0]) img = x[np.newaxis, ...] food_model = create_model(img) food_model.summary() latest = tf.train.latest_checkpoint(models) latest if latest != None: food_model.load_weights(latest) def observe_image(image, model): x, _ = load_image(image) img = x[np.newaxis, ...] return model.predict(img) import ipywidgets as widgets from IPython.display import display from IPython.display import Javascript test_states = [] #@title Eat/No Eat Training { run: "auto", vertical-output: true, display-mode: "form" } image_idx = 19 #@param {type:"slider", min:0, max:100, step:1} val = f"Images Trained {len(test_states)}" label = widgets.Label( value= val, disabled=False ) display(label) cnt = len(images) image_idx = image_idx if image_idx < cnt else cnt - 1 image = images[image_idx] x, _ = load_image(image) img = x[np.newaxis, ...] predict = food_model.predict(img) print(predict+5) print(image_idx,image) plt.imshow((x+1)/2) toggle = widgets.ToggleButtons( options=['Eat', 'No Eat'], disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Description', # icon='check' ) display(toggle) button = widgets.Button(description="Train!") output = widgets.Output() def button_clicked(b): # Display the message within the output widget. with output: test = (predict,toggle.index,image) test_states.append(test) button.on_click(button_clicked) display(button, output) if len(test_states) > 0: if os.path.isdir(training_folder) == False: os.makedirs(training_folder) pickle.dump( test_states, open( training_folder + "food_test.p", "wb" ) ) ```
github_jupyter
# Recurrent Neural Networks (RNN) with Keras ## Learning Objectives 1. Add built-in RNN layers. 2. Build bidirectional RNNs. 3. Using CuDNN kernels when available. 4. Build a RNN model with nested input/output. ## Introduction Recurrent neural networks (RNN) are a class of neural networks that is powerful for modeling sequence data such as time series or natural language. Schematically, a RNN layer uses a `for` loop to iterate over the timesteps of a sequence, while maintaining an internal state that encodes information about the timesteps it has seen so far. The Keras RNN API is designed with a focus on: - **Ease of use**: the built-in `keras.layers.RNN`, `keras.layers.LSTM`, `keras.layers.GRU` layers enable you to quickly build recurrent models without having to make difficult configuration choices. - **Ease of customization**: You can also define your own RNN cell layer (the inner part of the `for` loop) with custom behavior, and use it with the generic `keras.layers.RNN` layer (the `for` loop itself). This allows you to quickly prototype different research ideas in a flexible way with minimal code. Each learning objective will correspond to a __#TODO__ in the notebook where you will complete the notebook cell's code before running. Refer to the [solution](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/text_classification/solutions/rnn.ipynb) for reference. ## Setup ``` import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers ``` ## Built-in RNN layers: a simple example There are three built-in RNN layers in Keras: 1. `keras.layers.SimpleRNN`, a fully-connected RNN where the output from previous timestep is to be fed to next timestep. 2. `keras.layers.GRU`, first proposed in [Cho et al., 2014](https://arxiv.org/abs/1406.1078). 3. `keras.layers.LSTM`, first proposed in [Hochreiter & Schmidhuber, 1997](https://www.bioinf.jku.at/publications/older/2604.pdf). In early 2015, Keras had the first reusable open-source Python implementations of LSTM and GRU. Here is a simple example of a `Sequential` model that processes sequences of integers, embeds each integer into a 64-dimensional vector, then processes the sequence of vectors using a `LSTM` layer. ``` model = keras.Sequential() # Add an Embedding layer expecting input vocab of size 1000, and # output embedding dimension of size 64. model.add(layers.Embedding(input_dim=1000, output_dim=64)) # Add a LSTM layer with 128 internal units. # TODO -- your code goes here # Add a Dense layer with 10 units. # TODO -- your code goes here model.summary() ``` Built-in RNNs support a number of useful features: - Recurrent dropout, via the `dropout` and `recurrent_dropout` arguments - Ability to process an input sequence in reverse, via the `go_backwards` argument - Loop unrolling (which can lead to a large speedup when processing short sequences on CPU), via the `unroll` argument - ...and more. For more information, see the [RNN API documentation](https://keras.io/api/layers/recurrent_layers/). ## Outputs and states By default, the output of a RNN layer contains a single vector per sample. This vector is the RNN cell output corresponding to the last timestep, containing information about the entire input sequence. The shape of this output is `(batch_size, units)` where `units` corresponds to the `units` argument passed to the layer's constructor. A RNN layer can also return the entire sequence of outputs for each sample (one vector per timestep per sample), if you set `return_sequences=True`. The shape of this output is `(batch_size, timesteps, units)`. ``` model = keras.Sequential() model.add(layers.Embedding(input_dim=1000, output_dim=64)) # The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256) model.add(layers.GRU(256, return_sequences=True)) # The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128) model.add(layers.SimpleRNN(128)) model.add(layers.Dense(10)) model.summary() ``` In addition, a RNN layer can return its final internal state(s). The returned states can be used to resume the RNN execution later, or [to initialize another RNN](https://arxiv.org/abs/1409.3215). This setting is commonly used in the encoder-decoder sequence-to-sequence model, where the encoder final state is used as the initial state of the decoder. To configure a RNN layer to return its internal state, set the `return_state` parameter to `True` when creating the layer. Note that `LSTM` has 2 state tensors, but `GRU` only has one. To configure the initial state of the layer, just call the layer with additional keyword argument `initial_state`. Note that the shape of the state needs to match the unit size of the layer, like in the example below. ``` encoder_vocab = 1000 decoder_vocab = 2000 encoder_input = layers.Input(shape=(None,)) encoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)( encoder_input ) # Return states in addition to output output, state_h, state_c = layers.LSTM(64, return_state=True, name="encoder")( encoder_embedded ) encoder_state = [state_h, state_c] decoder_input = layers.Input(shape=(None,)) decoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)( decoder_input ) # Pass the 2 states to a new LSTM layer, as initial state decoder_output = layers.LSTM(64, name="decoder")( decoder_embedded, initial_state=encoder_state ) output = layers.Dense(10)(decoder_output) model = keras.Model([encoder_input, decoder_input], output) model.summary() ``` ## RNN layers and RNN cells In addition to the built-in RNN layers, the RNN API also provides cell-level APIs. Unlike RNN layers, which processes whole batches of input sequences, the RNN cell only processes a single timestep. The cell is the inside of the `for` loop of a RNN layer. Wrapping a cell inside a `keras.layers.RNN` layer gives you a layer capable of processing batches of sequences, e.g. `RNN(LSTMCell(10))`. Mathematically, `RNN(LSTMCell(10))` produces the same result as `LSTM(10)`. In fact, the implementation of this layer in TF v1.x was just creating the corresponding RNN cell and wrapping it in a RNN layer. However using the built-in `GRU` and `LSTM` layers enable the use of CuDNN and you may see better performance. There are three built-in RNN cells, each of them corresponding to the matching RNN layer. - `keras.layers.SimpleRNNCell` corresponds to the `SimpleRNN` layer. - `keras.layers.GRUCell` corresponds to the `GRU` layer. - `keras.layers.LSTMCell` corresponds to the `LSTM` layer. The cell abstraction, together with the generic `keras.layers.RNN` class, make it very easy to implement custom RNN architectures for your research. ## Cross-batch statefulness When processing very long sequences (possibly infinite), you may want to use the pattern of **cross-batch statefulness**. Normally, the internal state of a RNN layer is reset every time it sees a new batch (i.e. every sample seen by the layer is assumed to be independent of the past). The layer will only maintain a state while processing a given sample. If you have very long sequences though, it is useful to break them into shorter sequences, and to feed these shorter sequences sequentially into a RNN layer without resetting the layer's state. That way, the layer can retain information about the entirety of the sequence, even though it's only seeing one sub-sequence at a time. You can do this by setting `stateful=True` in the constructor. If you have a sequence `s = [t0, t1, ... t1546, t1547]`, you would split it into e.g. ``` s1 = [t0, t1, ... t100] s2 = [t101, ... t201] ... s16 = [t1501, ... t1547] ``` Then you would process it via: ```python lstm_layer = layers.LSTM(64, stateful=True) for s in sub_sequences: output = lstm_layer(s) ``` When you want to clear the state, you can use `layer.reset_states()`. > Note: In this setup, sample `i` in a given batch is assumed to be the continuation of sample `i` in the previous batch. This means that all batches should contain the same number of samples (batch size). E.g. if a batch contains `[sequence_A_from_t0_to_t100, sequence_B_from_t0_to_t100]`, the next batch should contain `[sequence_A_from_t101_to_t200, sequence_B_from_t101_to_t200]`. Here is a complete example: ``` paragraph1 = np.random.random((20, 10, 50)).astype(np.float32) paragraph2 = np.random.random((20, 10, 50)).astype(np.float32) paragraph3 = np.random.random((20, 10, 50)).astype(np.float32) lstm_layer = layers.LSTM(64, stateful=True) output = lstm_layer(paragraph1) output = lstm_layer(paragraph2) output = lstm_layer(paragraph3) # reset_states() will reset the cached state to the original initial_state. # If no initial_state was provided, zero-states will be used by default. # TODO -- your code goes here ``` ### RNN State Reuse <a id="rnn_state_reuse"></a> The recorded states of the RNN layer are not included in the `layer.weights()`. If you would like to reuse the state from a RNN layer, you can retrieve the states value by `layer.states` and use it as the initial state for a new layer via the Keras functional API like `new_layer(inputs, initial_state=layer.states)`, or model subclassing. Please also note that sequential model might not be used in this case since it only supports layers with single input and output, the extra input of initial state makes it impossible to use here. ``` paragraph1 = np.random.random((20, 10, 50)).astype(np.float32) paragraph2 = np.random.random((20, 10, 50)).astype(np.float32) paragraph3 = np.random.random((20, 10, 50)).astype(np.float32) lstm_layer = layers.LSTM(64, stateful=True) output = lstm_layer(paragraph1) output = lstm_layer(paragraph2) existing_state = lstm_layer.states new_lstm_layer = layers.LSTM(64) new_output = new_lstm_layer(paragraph3, initial_state=existing_state) ``` ## Bidirectional RNNs For sequences other than time series (e.g. text), it is often the case that a RNN model can perform better if it not only processes sequence from start to end, but also backwards. For example, to predict the next word in a sentence, it is often useful to have the context around the word, not only just the words that come before it. Keras provides an easy API for you to build such bidirectional RNNs: the `keras.layers.Bidirectional` wrapper. ``` model = keras.Sequential() # Add Bidirectional layers # TODO -- your code goes here model.summary() ``` Under the hood, `Bidirectional` will copy the RNN layer passed in, and flip the `go_backwards` field of the newly copied layer, so that it will process the inputs in reverse order. The output of the `Bidirectional` RNN will be, by default, the concatenation of the forward layer output and the backward layer output. If you need a different merging behavior, e.g. concatenation, change the `merge_mode` parameter in the `Bidirectional` wrapper constructor. For more details about `Bidirectional`, please check [the API docs](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Bidirectional/). ## Performance optimization and CuDNN kernels In TensorFlow 2.0, the built-in LSTM and GRU layers have been updated to leverage CuDNN kernels by default when a GPU is available. With this change, the prior `keras.layers.CuDNNLSTM/CuDNNGRU` layers have been deprecated, and you can build your model without worrying about the hardware it will run on. Since the CuDNN kernel is built with certain assumptions, this means the layer **will not be able to use the CuDNN kernel if you change the defaults of the built-in LSTM or GRU layers**. E.g.: - Changing the `activation` function from `tanh` to something else. - Changing the `recurrent_activation` function from `sigmoid` to something else. - Using `recurrent_dropout` > 0. - Setting `unroll` to True, which forces LSTM/GRU to decompose the inner `tf.while_loop` into an unrolled `for` loop. - Setting `use_bias` to False. - Using masking when the input data is not strictly right padded (if the mask corresponds to strictly right padded data, CuDNN can still be used. This is the most common case). For the detailed list of constraints, please see the documentation for the [LSTM](https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM/) and [GRU](https://www.tensorflow.org/api_docs/python/tf/keras/layers/GRU/) layers. ### Using CuDNN kernels when available Let's build a simple LSTM model to demonstrate the performance difference. We'll use as input sequences the sequence of rows of MNIST digits (treating each row of pixels as a timestep), and we'll predict the digit's label. ``` batch_size = 64 # Each MNIST image batch is a tensor of shape (batch_size, 28, 28). # Each input sequence will be of size (28, 28) (height is treated like time). input_dim = 28 units = 64 output_size = 10 # labels are from 0 to 9 # Build the RNN model def build_model(allow_cudnn_kernel=True): # CuDNN is only available at the layer level, and not at the cell level. # This means `LSTM(units)` will use the CuDNN kernel, # while RNN(LSTMCell(units)) will run on non-CuDNN kernel. if allow_cudnn_kernel: # The LSTM layer with default options uses CuDNN. lstm_layer = keras.layers.LSTM(units, input_shape=(None, input_dim)) else: # Wrapping a LSTMCell in a RNN layer will not use CuDNN. lstm_layer = keras.layers.RNN( keras.layers.LSTMCell(units), input_shape=(None, input_dim) ) model = keras.models.Sequential( [ lstm_layer, keras.layers.BatchNormalization(), keras.layers.Dense(output_size), ] ) return model ``` Let's load the MNIST dataset: ``` mnist = keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 sample, sample_label = x_train[0], y_train[0] ``` Let's create a model instance and train it. We choose `sparse_categorical_crossentropy` as the loss function for the model. The output of the model has shape of `[batch_size, 10]`. The target for the model is an integer vector, each of the integer is in the range of 0 to 9. ``` model = build_model(allow_cudnn_kernel=True) # Compile the model # TODO -- your code goes here model.fit( x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1 ) ``` Now, let's compare to a model that does not use the CuDNN kernel: ``` noncudnn_model = build_model(allow_cudnn_kernel=False) noncudnn_model.set_weights(model.get_weights()) noncudnn_model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer="sgd", metrics=["accuracy"], ) noncudnn_model.fit( x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1 ) ``` When running on a machine with a NVIDIA GPU and CuDNN installed, the model built with CuDNN is much faster to train compared to the model that uses the regular TensorFlow kernel. The same CuDNN-enabled model can also be used to run inference in a CPU-only environment. The `tf.device` annotation below is just forcing the device placement. The model will run on CPU by default if no GPU is available. You simply don't have to worry about the hardware you're running on anymore. Isn't that pretty cool? ``` import matplotlib.pyplot as plt with tf.device("CPU:0"): cpu_model = build_model(allow_cudnn_kernel=True) cpu_model.set_weights(model.get_weights()) result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1) print( "Predicted result is: %s, target result is: %s" % (result.numpy(), sample_label) ) plt.imshow(sample, cmap=plt.get_cmap("gray")) ``` ## RNNs with list/dict inputs, or nested inputs Nested structures allow implementers to include more information within a single timestep. For example, a video frame could have audio and video input at the same time. The data shape in this case could be: `[batch, timestep, {"video": [height, width, channel], "audio": [frequency]}]` In another example, handwriting data could have both coordinates x and y for the current position of the pen, as well as pressure information. So the data representation could be: `[batch, timestep, {"location": [x, y], "pressure": [force]}]` The following code provides an example of how to build a custom RNN cell that accepts such structured inputs. ### Define a custom cell that supports nested input/output See [Making new Layers & Models via subclassing](https://www.tensorflow.org/guide/keras/custom_layers_and_models/) for details on writing your own layers. ``` class NestedCell(keras.layers.Layer): def __init__(self, unit_1, unit_2, unit_3, **kwargs): self.unit_1 = unit_1 self.unit_2 = unit_2 self.unit_3 = unit_3 self.state_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])] self.output_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])] super(NestedCell, self).__init__(**kwargs) def build(self, input_shapes): # expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)] i1 = input_shapes[0][1] i2 = input_shapes[1][1] i3 = input_shapes[1][2] self.kernel_1 = self.add_weight( shape=(i1, self.unit_1), initializer="uniform", name="kernel_1" ) self.kernel_2_3 = self.add_weight( shape=(i2, i3, self.unit_2, self.unit_3), initializer="uniform", name="kernel_2_3", ) def call(self, inputs, states): # inputs should be in [(batch, input_1), (batch, input_2, input_3)] # state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)] input_1, input_2 = tf.nest.flatten(inputs) s1, s2 = states output_1 = tf.matmul(input_1, self.kernel_1) output_2_3 = tf.einsum("bij,ijkl->bkl", input_2, self.kernel_2_3) state_1 = s1 + output_1 state_2_3 = s2 + output_2_3 output = (output_1, output_2_3) new_states = (state_1, state_2_3) return output, new_states def get_config(self): return {"unit_1": self.unit_1, "unit_2": unit_2, "unit_3": self.unit_3} ``` ### Build a RNN model with nested input/output Let's build a Keras model that uses a `keras.layers.RNN` layer and the custom cell we just defined. ``` unit_1 = 10 unit_2 = 20 unit_3 = 30 i1 = 32 i2 = 64 i3 = 32 batch_size = 64 num_batches = 10 timestep = 50 cell = NestedCell(unit_1, unit_2, unit_3) rnn = keras.layers.RNN(cell) input_1 = keras.Input((None, i1)) input_2 = keras.Input((None, i2, i3)) outputs = rnn((input_1, input_2)) model = keras.models.Model([input_1, input_2], outputs) model.compile(optimizer="adam", loss="mse", metrics=["accuracy"]) ``` ### Train the model with randomly generated data Since there isn't a good candidate dataset for this model, we use random Numpy data for demonstration. ``` input_1_data = np.random.random((batch_size * num_batches, timestep, i1)) input_2_data = np.random.random((batch_size * num_batches, timestep, i2, i3)) target_1_data = np.random.random((batch_size * num_batches, unit_1)) target_2_data = np.random.random((batch_size * num_batches, unit_2, unit_3)) input_data = [input_1_data, input_2_data] target_data = [target_1_data, target_2_data] model.fit(input_data, target_data, batch_size=batch_size) ``` With the Keras `keras.layers.RNN` layer, You are only expected to define the math logic for individual step within the sequence, and the `keras.layers.RNN` layer will handle the sequence iteration for you. It's an incredibly powerful way to quickly prototype new kinds of RNNs (e.g. a LSTM variant). For more details, please visit the [API docs](https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN/).
github_jupyter
# Pretrained GPT2 Model Deployment Example In this notebook, we will run an example of text generation using GPT2 model exported from HuggingFace and deployed with Seldon's Triton pre-packed server. the example also covers converting the model to ONNX format. The implemented example below is of the Greedy approach for the next token prediction. more info: https://huggingface.co/transformers/model_doc/gpt2.html?highlight=gpt2 After we have the module deployed to Kubernetes, we will run a simple load test to evaluate the module inference performance. ## Steps: 1. Download pretrained GPT2 model from hugging face 2. Convert the model to ONNX 3. Store it in MinIo bucket 4. Setup Seldon-Core in your kubernetes cluster 5. Deploy the ONNX model with Seldon’s prepackaged Triton server. 6. Interact with the model, run a greedy alg example (generate sentence completion) 7. Run load test using vegeta 8. Clean-up ## Basic requirements * Helm v3.0.0+ * A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM) * kubectl v1.14+ * Python 3.6+ ``` %%writefile requirements.txt transformers==4.5.1 torch==1.8.1 tokenizers<0.11,>=0.10.1 tensorflow==2.4.1 tf2onnx !pip install --trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org -r requirements.txt ``` ### Export HuggingFace TFGPT2LMHeadModel pre-trained model and save it locally ``` from transformers import GPT2Tokenizer, TFGPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = TFGPT2LMHeadModel.from_pretrained( "gpt2", from_pt=True, pad_token_id=tokenizer.eos_token_id ) model.save_pretrained("./tfgpt2model", saved_model=True) ``` ### Convert the TensorFlow saved model to ONNX ``` !python -m tf2onnx.convert --saved-model ./tfgpt2model/saved_model/1 --opset 11 --output model.onnx ``` ### Copy your model to a local MinIo #### Setup MinIo Use the provided [notebook](https://docs.seldon.io/projects/seldon-core/en/latest/examples/minio_setup.html) to install MinIo in your cluster and configure `mc` CLI tool. Instructions also [online](https://docs.min.io/docs/minio-client-quickstart-guide.html). -- Note: You can use your prefer remote storage server (google/ AWS etc.) #### Create a Bucket and store your model ``` !mc mb minio-seldon/onnx-gpt2 -p !mc cp ./model.onnx minio-seldon/onnx-gpt2/gpt2/1/ ``` ### Run Seldon in your kubernetes cluster Follow the [Seldon-Core Setup notebook](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html) to Setup a cluster with Ambassador Ingress or Istio and install Seldon Core ### Deploy your model with Seldon pre-packaged Triton server ``` %%writefile secret.yaml apiVersion: v1 kind: Secret metadata: name: seldon-init-container-secret type: Opaque stringData: RCLONE_CONFIG_S3_TYPE: s3 RCLONE_CONFIG_S3_PROVIDER: minio RCLONE_CONFIG_S3_ENV_AUTH: "false" RCLONE_CONFIG_S3_ACCESS_KEY_ID: minioadmin RCLONE_CONFIG_S3_SECRET_ACCESS_KEY: minioadmin RCLONE_CONFIG_S3_ENDPOINT: http://minio.minio-system.svc.cluster.local:9000 %%writefile gpt2-deploy.yaml apiVersion: machinelearning.seldon.io/v1alpha2 kind: SeldonDeployment metadata: name: gpt2 spec: predictors: - graph: implementation: TRITON_SERVER logger: mode: all modelUri: s3://onnx-gpt2 envSecretRefName: seldon-init-container-secret name: gpt2 type: MODEL name: default replicas: 1 protocol: kfserving !kubectl apply -f secret.yaml -n default !kubectl apply -f gpt2-deploy.yaml -n default !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=gpt2 -o jsonpath='{.items[0].metadata.name}') ``` #### Interact with the model: get model metadata (a "test" request to make sure our model is available and loaded correctly) ``` !curl -v http://localhost:80/seldon/default/gpt2/v2/models/gpt2 ``` ### Run prediction test: generate a sentence completion using GPT2 model - Greedy approach ``` import json import numpy as np import requests from transformers import GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2") input_text = "I enjoy working in Seldon" count = 0 max_gen_len = 10 gen_sentence = input_text while count < max_gen_len: input_ids = tokenizer.encode(gen_sentence, return_tensors="tf") shape = input_ids.shape.as_list() payload = { "inputs": [ { "name": "input_ids:0", "datatype": "INT32", "shape": shape, "data": input_ids.numpy().tolist(), }, { "name": "attention_mask:0", "datatype": "INT32", "shape": shape, "data": np.ones(shape, dtype=np.int32).tolist(), }, ] } ret = requests.post( "http://localhost:80/seldon/default/gpt2/v2/models/gpt2/infer", json=payload ) try: res = ret.json() except: continue # extract logits logits = np.array(res["outputs"][1]["data"]) logits = logits.reshape(res["outputs"][1]["shape"]) # take the best next token probability of the last token of input ( greedy approach) next_token = logits.argmax(axis=2)[0] next_token_str = tokenizer.decode( next_token[-1:], skip_special_tokens=True, clean_up_tokenization_spaces=True ).strip() gen_sentence += " " + next_token_str count += 1 print(f"Input: {input_text}\nOutput: {gen_sentence}") ``` ### Run Load Test / Performance Test using vegeta #### Install vegeta, for more details take a look in [vegeta](https://github.com/tsenart/vegeta#install) official documentation ``` !wget https://github.com/tsenart/vegeta/releases/download/v12.8.3/vegeta-12.8.3-linux-amd64.tar.gz !tar -zxvf vegeta-12.8.3-linux-amd64.tar.gz !chmod +x vegeta ``` #### Generate vegeta [target file](https://github.com/tsenart/vegeta#-targets) contains "post" cmd with payload in the requiered structure ``` import base64 import json from subprocess import PIPE, Popen, run import numpy as np from transformers import GPT2Tokenizer, TFGPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained("gpt2") input_text = "I enjoy working in Seldon" input_ids = tokenizer.encode(input_text, return_tensors="tf") shape = input_ids.shape.as_list() payload = { "inputs": [ { "name": "input_ids:0", "datatype": "INT32", "shape": shape, "data": input_ids.numpy().tolist(), }, { "name": "attention_mask:0", "datatype": "INT32", "shape": shape, "data": np.ones(shape, dtype=np.int32).tolist(), }, ] } cmd = { "method": "POST", "header": {"Content-Type": ["application/json"]}, "url": "http://localhost:80/seldon/default/gpt2/v2/models/gpt2/infer", "body": base64.b64encode(bytes(json.dumps(payload), "utf-8")).decode("utf-8"), } with open("vegeta_target.json", mode="w") as file: json.dump(cmd, file) file.write("\n\n") !vegeta attack -targets=vegeta_target.json -rate=1 -duration=60s -format=json | vegeta report -type=text ``` ### Clean-up ``` !kubectl delete -f gpt2-deploy.yaml -n default ```
github_jupyter
# Naive forecasting ## Setup ``` import numpy as np import matplotlib.pyplot as plt def plot_series(time, series, format="-", start=0, end=None, label=None): plt.plot(time[start:end], series[start:end], format, label=label) plt.xlabel("Time") plt.ylabel("Value") if label: plt.legend(fontsize=14) plt.grid(True) def trend(time, slope=0): return slope * time def seasonal_pattern(season_time): """Just an arbitrary pattern, you can change it if you wish""" return np.where(season_time < 0.4, np.cos(season_time * 2 * np.pi), 1 / np.exp(3 * season_time)) def seasonality(time, period, amplitude=1, phase=0): """Repeats the same pattern at each period""" season_time = ((time + phase) % period) / period return amplitude * seasonal_pattern(season_time) def white_noise(time, noise_level=1, seed=None): rnd = np.random.RandomState(seed) return rnd.randn(len(time)) * noise_level ``` ## Trend and Seasonality ``` time = np.arange(4 * 365 + 1) slope = 0.05 baseline = 10 amplitude = 40 series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude) noise_level = 5 noise = white_noise(time, noise_level, seed=42) series += noise plt.figure(figsize=(10, 6)) plot_series(time, series) plt.show() ``` All right, this looks realistic enough for now. Let's try to forecast it. We will split it into two periods: the training period and the validation period (in many cases, you would also want to have a test period). The split will be at time step 1000. ``` split_time = 1000 time_train = time[:split_time] x_train = series[:split_time] time_valid = time[split_time:] x_valid = series[split_time:] ``` ## Naive Forecast ``` naive_forecast = series[split_time - 1:-1] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid, label="Series") plot_series(time_valid, naive_forecast, label="Forecast") ``` Let's zoom in on the start of the validation period: ``` plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid, start=0, end=150, label="Series") plot_series(time_valid, naive_forecast, start=1, end=151, label="Forecast") ``` You can see that the naive forecast lags 1 step behind the time series. Now let's compute the mean absolute error between the forecasts and the predictions in the validation period: ``` errors = naive_forecast - x_valid abs_errors = np.abs(errors) mae = abs_errors.mean() mae ``` That's our baseline, now let's try a moving average.
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # TF Lattice 사전 제작 모델 <table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://www.tensorflow.org/lattice/tutorials/premade_models"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/lattice/tutorials/premade_models.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행하기</a></td> <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/lattice/tutorials/premade_models.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서 소스 보기</a></td> <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/lattice/tutorials/premade_models.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드하기</a></td> </table> ## 개요 사전 제작된 모델은 일반적인 사용 사례를 위해 TFL `tf.keras.model` 인스턴스를 구축하는 빠르고 쉬운 방법입니다. 이 가이드에서는 TFL 사전 제작 모델을 구성하고 훈련/테스트하는 데 필요한 단계를 설명합니다. ## 설정 TF Lattice 패키지 설치하기 ``` #@test {"skip": true} !pip install tensorflow-lattice pydot ``` 필수 패키지 가져오기 ``` import tensorflow as tf import copy import logging import numpy as np import pandas as pd import sys import tensorflow_lattice as tfl logging.disable(sys.maxsize) ``` UCI Statlog(Heart) 데이터세트 다운로드하기 ``` csv_file = tf.keras.utils.get_file( 'heart.csv', 'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv') df = pd.read_csv(csv_file) train_size = int(len(df) * 0.8) train_dataframe = df[:train_size] test_dataframe = df[train_size:] df.head() ``` 특성과 레이블을 추출하고 텐서로 변환합니다. ``` # Features: # - age # - sex # - cp chest pain type (4 values) # - trestbps resting blood pressure # - chol serum cholestoral in mg/dl # - fbs fasting blood sugar > 120 mg/dl # - restecg resting electrocardiographic results (values 0,1,2) # - thalach maximum heart rate achieved # - exang exercise induced angina # - oldpeak ST depression induced by exercise relative to rest # - slope the slope of the peak exercise ST segment # - ca number of major vessels (0-3) colored by flourosopy # - thal 3 = normal; 6 = fixed defect; 7 = reversable defect # # This ordering of feature names will be the exact same order that we construct # our model to expect. feature_names = [ 'age', 'sex', 'cp', 'chol', 'fbs', 'trestbps', 'thalach', 'restecg', 'exang', 'oldpeak', 'slope', 'ca', 'thal' ] feature_name_indices = {name: index for index, name in enumerate(feature_names)} # This is the vocab list and mapping we will use for the 'thal' categorical # feature. thal_vocab_list = ['normal', 'fixed', 'reversible'] thal_map = {category: i for i, category in enumerate(thal_vocab_list)} # Custom function for converting thal categories to buckets def convert_thal_features(thal_features): # Note that two examples in the test set are already converted. return np.array([ thal_map[feature] if feature in thal_vocab_list else feature for feature in thal_features ]) # Custom function for extracting each feature. def extract_features(dataframe, label_name='target', feature_names=feature_names): features = [] for feature_name in feature_names: if feature_name == 'thal': features.append( convert_thal_features(dataframe[feature_name].values).astype(float)) else: features.append(dataframe[feature_name].values.astype(float)) labels = dataframe[label_name].values.astype(float) return features, labels train_xs, train_ys = extract_features(train_dataframe) test_xs, test_ys = extract_features(test_dataframe) # Let's define our label minimum and maximum. min_label, max_label = float(np.min(train_ys)), float(np.max(train_ys)) # Our lattice models may have predictions above 1.0 due to numerical errors. # We can subtract this small epsilon value from our output_max to make sure we # do not predict values outside of our label bound. numerical_error_epsilon = 1e-5 ``` 이 가이드에서 훈련에 사용되는 기본값 설정하기 ``` LEARNING_RATE = 0.01 BATCH_SIZE = 128 NUM_EPOCHS = 500 PREFITTING_NUM_EPOCHS = 10 ``` ## 특성 구성 특성 보정 및 특성별 구성은 [tfl.configs.FeatureConfig](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs/FeatureConfig)를 사용하여 설정됩니다. 특성 구성에는 단조 제약 조건, 특성별 정규화([tfl.configs.RegularizerConfig](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs/RegularizerConfig) 참조) 및 격자 모델에 대한 격자 크기가 포함됩니다. 모델이 인식해야 할 모든 특성에 대한 특성 구성을 완전하게 지정해야합니다. 그렇지 않으면 모델은 이러한 특성이 존재하는지 알 수 없습니다. ### 분위수 계산하기 `tfl.configs.FeatureConfig`에서 `pwl_calibration_input_keypoints`의 기본 설정은 'quantiles'이지만 사전 제작된 모델의 경우 입력 키포인트를 수동으로 정의해야 합니다. 이를 위해 먼저 분위수 계산을 위한 자체 도우미 함수를 정의합니다. ``` def compute_quantiles(features, num_keypoints=10, clip_min=None, clip_max=None, missing_value=None): # Clip min and max if desired. if clip_min is not None: features = np.maximum(features, clip_min) features = np.append(features, clip_min) if clip_max is not None: features = np.minimum(features, clip_max) features = np.append(features, clip_max) # Make features unique. unique_features = np.unique(features) # Remove missing values if specified. if missing_value is not None: unique_features = np.delete(unique_features, np.where(unique_features == missing_value)) # Compute and return quantiles over unique non-missing feature values. return np.quantile( unique_features, np.linspace(0., 1., num=num_keypoints), interpolation='nearest').astype(float) ``` ### 특성 구성 정의하기 이제 분위수를 계산할 수 있으므로 모델이 입력으로 사용하기 원하는 각 특성에 대한 특성 구성을 정의합니다. ``` # Feature configs are used to specify how each feature is calibrated and used. feature_configs = [ tfl.configs.FeatureConfig( name='age', lattice_size=3, monotonicity='increasing', # We must set the keypoints manually. pwl_calibration_num_keypoints=5, pwl_calibration_input_keypoints=compute_quantiles( train_xs[feature_name_indices['age']], num_keypoints=5, clip_max=100), # Per feature regularization. regularizer_configs=[ tfl.configs.RegularizerConfig(name='calib_wrinkle', l2=0.1), ], ), tfl.configs.FeatureConfig( name='sex', num_buckets=2, ), tfl.configs.FeatureConfig( name='cp', monotonicity='increasing', # Keypoints that are uniformly spaced. pwl_calibration_num_keypoints=4, pwl_calibration_input_keypoints=np.linspace( np.min(train_xs[feature_name_indices['cp']]), np.max(train_xs[feature_name_indices['cp']]), num=4), ), tfl.configs.FeatureConfig( name='chol', monotonicity='increasing', # Explicit input keypoints initialization. pwl_calibration_input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0], # Calibration can be forced to span the full output range by clamping. pwl_calibration_clamp_min=True, pwl_calibration_clamp_max=True, # Per feature regularization. regularizer_configs=[ tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4), ], ), tfl.configs.FeatureConfig( name='fbs', # Partial monotonicity: output(0) <= output(1) monotonicity=[(0, 1)], num_buckets=2, ), tfl.configs.FeatureConfig( name='trestbps', monotonicity='decreasing', pwl_calibration_num_keypoints=5, pwl_calibration_input_keypoints=compute_quantiles( train_xs[feature_name_indices['trestbps']], num_keypoints=5), ), tfl.configs.FeatureConfig( name='thalach', monotonicity='decreasing', pwl_calibration_num_keypoints=5, pwl_calibration_input_keypoints=compute_quantiles( train_xs[feature_name_indices['thalach']], num_keypoints=5), ), tfl.configs.FeatureConfig( name='restecg', # Partial monotonicity: output(0) <= output(1), output(0) <= output(2) monotonicity=[(0, 1), (0, 2)], num_buckets=3, ), tfl.configs.FeatureConfig( name='exang', # Partial monotonicity: output(0) <= output(1) monotonicity=[(0, 1)], num_buckets=2, ), tfl.configs.FeatureConfig( name='oldpeak', monotonicity='increasing', pwl_calibration_num_keypoints=5, pwl_calibration_input_keypoints=compute_quantiles( train_xs[feature_name_indices['oldpeak']], num_keypoints=5), ), tfl.configs.FeatureConfig( name='slope', # Partial monotonicity: output(0) <= output(1), output(1) <= output(2) monotonicity=[(0, 1), (1, 2)], num_buckets=3, ), tfl.configs.FeatureConfig( name='ca', monotonicity='increasing', pwl_calibration_num_keypoints=4, pwl_calibration_input_keypoints=compute_quantiles( train_xs[feature_name_indices['ca']], num_keypoints=4), ), tfl.configs.FeatureConfig( name='thal', # Partial monotonicity: # output(normal) <= output(fixed) # output(normal) <= output(reversible) monotonicity=[('normal', 'fixed'), ('normal', 'reversible')], num_buckets=3, # We must specify the vocabulary list in order to later set the # monotonicities since we used names and not indices. vocabulary_list=thal_vocab_list, ), ] ``` 다음으로 사용자 정의 어휘(위의 'thal'과 같은)를 사용한 특성에 대해 단조를 올바르게 설정해야합니다. ``` tfl.premade_lib.set_categorical_monotonicities(feature_configs) ``` ## 보정된 선형 모델 TFL 사전 제작 모델을 구성하려면 먼저 [tfl.configs](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs)에서 모델 구성을 갖추세요. 보정된 선형 모델은 [tfl.configs.CalibratedLinearConfig](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs/CalibratedLinearConfig)를 사용하여 구성됩니다. 입력 특성에 구간 선형 및 범주형 보정을 적용한 다음 선형 조합 및 선택적 출력 구간 선형 보정을 적용합니다. 출력 보정을 사용하거나 출력 경계가 지정된 경우 선형 레이어는 보정된 입력에 가중치 평균을 적용합니다. 이 예제는 처음 5개 특성에 대해 보정된 선형 모델을 만듭니다. ``` # Model config defines the model structure for the premade model. linear_model_config = tfl.configs.CalibratedLinearConfig( feature_configs=feature_configs[:5], use_bias=True, # We must set the output min and max to that of the label. output_min=min_label, output_max=max_label, output_calibration=True, output_calibration_num_keypoints=10, output_initialization=np.linspace(min_label, max_label, num=10), regularizer_configs=[ # Regularizer for the output calibrator. tfl.configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4), ]) # A CalibratedLinear premade model constructed from the given model config. linear_model = tfl.premade.CalibratedLinear(linear_model_config) # Let's plot our model. tf.keras.utils.plot_model(linear_model, show_layer_names=False, rankdir='LR') ``` 이제 다른 [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)과 마찬가지로 모델을 데이터에 맞게 컴파일하고 적합하도록 맞춥니다. ``` linear_model.compile( loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.AUC()], optimizer=tf.keras.optimizers.Adam(LEARNING_RATE)) linear_model.fit( train_xs[:5], train_ys, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=False) ``` 모델을 훈련한 후 테스트세트에서 평가할 수 있습니다. ``` print('Test Set Evaluation...') print(linear_model.evaluate(test_xs[:5], test_ys)) ``` ## 보정된 격자 모델 보정된 격자 모델은 [tfl.configs.CalibratedLatticeConfig](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs/CalibratedLatticeConfig)를 사용하여 구성됩니다. 보정된 격자 모델은 입력 특성에 구간별 선형 및 범주형 보정을 적용한 다음 격자 모델 및 선택적 출력 구간별 선형 보정을 적용합니다. 이 예제에서는 처음 5개의 특성에 대해 보정된 격자 모델을 만듭니다. ``` # This is a calibrated lattice model: inputs are calibrated, then combined # non-linearly using a lattice layer. lattice_model_config = tfl.configs.CalibratedLatticeConfig( feature_configs=feature_configs[:5], output_min=min_label, output_max=max_label - numerical_error_epsilon, output_initialization=[min_label, max_label], regularizer_configs=[ # Torsion regularizer applied to the lattice to make it more linear. tfl.configs.RegularizerConfig(name='torsion', l2=1e-2), # Globally defined calibration regularizer is applied to all features. tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-2), ]) # A CalibratedLattice premade model constructed from the given model config. lattice_model = tfl.premade.CalibratedLattice(lattice_model_config) # Let's plot our model. tf.keras.utils.plot_model(lattice_model, show_layer_names=False, rankdir='LR') ``` 이전과 마찬가지로 모델을 컴파일하고 적합하도록 맞추고 평가합니다. ``` lattice_model.compile( loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.AUC()], optimizer=tf.keras.optimizers.Adam(LEARNING_RATE)) lattice_model.fit( train_xs[:5], train_ys, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=False) print('Test Set Evaluation...') print(lattice_model.evaluate(test_xs[:5], test_ys)) ``` ## 보정된 격자 앙상블 모델 특성 수가 많으면 앙상블 모델을 사용할 수 있습니다.이 모델은 특성의 하위 집합에 대해 여러 개의 작은 격자를 만들고, 하나의 거대한 격자를 만드는 대신 출력을 평균화합니다. 앙상블 격자 모델은 [tfl.configs.CalibratedLatticeEnsembleConfig](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs/CalibratedLatticeEnsembleConfig)를 사용하여 구성됩니다. 보정된 격자 앙상블 모델은 입력 특성에 구간별 선형 및 범주형 보정을 적용한 다음 격자 모델 앙상블과 선택적 출력 구간별 선형 보정을 적용합니다. ### 명시적 격자 앙상블 초기화 격자에 공급할 특성의 하위 집합을 이미 알고 있는 경우 특성 이름을 사용하여 격자를 명시적으로 설정할 수 있습니다. 이 예제에서는 5개의 격자와 격자당 3개의 특성이 있는 보정된 격자 앙상블 모델을 만듭니다. ``` # This is a calibrated lattice ensemble model: inputs are calibrated, then # combined non-linearly and averaged using multiple lattice layers. explicit_ensemble_model_config = tfl.configs.CalibratedLatticeEnsembleConfig( feature_configs=feature_configs, lattices=[['trestbps', 'chol', 'ca'], ['fbs', 'restecg', 'thal'], ['fbs', 'cp', 'oldpeak'], ['exang', 'slope', 'thalach'], ['restecg', 'age', 'sex']], num_lattices=5, lattice_rank=3, output_min=min_label, output_max=max_label - numerical_error_epsilon, output_initialization=[min_label, max_label]) # A CalibratedLatticeEnsemble premade model constructed from the given # model config. explicit_ensemble_model = tfl.premade.CalibratedLatticeEnsemble( explicit_ensemble_model_config) # Let's plot our model. tf.keras.utils.plot_model( explicit_ensemble_model, show_layer_names=False, rankdir='LR') ``` 이전과 마찬가지로 모델을 컴파일하고 적합하도록 맞추고 평가합니다. ``` explicit_ensemble_model.compile( loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.AUC()], optimizer=tf.keras.optimizers.Adam(LEARNING_RATE)) explicit_ensemble_model.fit( train_xs, train_ys, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=False) print('Test Set Evaluation...') print(explicit_ensemble_model.evaluate(test_xs, test_ys)) ``` ### 무작위 격자 앙상블 격자에 어떤 특성의 하위 집합을 제공할지 확실하지 않은 경우 각 격자에 대해 무작위의 특성 하위 집합을 사용해보는 옵션이 있습니다. 이 예제에서는 5개의 격자와 격자당 3개의 특성이 있는 보정된 격자 앙상블 모델을 만듭니다. ``` # This is a calibrated lattice ensemble model: inputs are calibrated, then # combined non-linearly and averaged using multiple lattice layers. random_ensemble_model_config = tfl.configs.CalibratedLatticeEnsembleConfig( feature_configs=feature_configs, lattices='random', num_lattices=5, lattice_rank=3, output_min=min_label, output_max=max_label - numerical_error_epsilon, output_initialization=[min_label, max_label], random_seed=42) # Now we must set the random lattice structure and construct the model. tfl.premade_lib.set_random_lattice_ensemble(random_ensemble_model_config) # A CalibratedLatticeEnsemble premade model constructed from the given # model config. random_ensemble_model = tfl.premade.CalibratedLatticeEnsemble( random_ensemble_model_config) # Let's plot our model. tf.keras.utils.plot_model( random_ensemble_model, show_layer_names=False, rankdir='LR') ``` 이전과 마찬가지로 모델을 컴파일하고 적합하도록 맞추고 평가합니다. ``` random_ensemble_model.compile( loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.AUC()], optimizer=tf.keras.optimizers.Adam(LEARNING_RATE)) random_ensemble_model.fit( train_xs, train_ys, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=False) print('Test Set Evaluation...') print(random_ensemble_model.evaluate(test_xs, test_ys)) ``` ### RTL 레이어 무작위 격자 앙상블 무작위 격자 앙상블을 사용하는 경우 모델이 단일 `tfl.layers.RTL` 레이어를 사용하도록 지정할 수 있습니다. `tfl.layers.RTL`은 단조 제약 조건만 지원하며 모든 특성에 대해 같은 격자 크기를 가져야 하고 특성별 정규화가 없어야 합니다. `tfl.layers.RTL` 레이어를 사용하면 별도의 `tfl.layers.Lattice` 인스턴스를 사용하는 것보다 훨씬 더 큰 앙상블로 확장할 수 있습니다. 이 예제에서는 5개의 격자와 격자당 3개의 특성이 있는 보정된 격자 앙상블 모델을 만듭니다. ``` # Make sure our feature configs have the same lattice size, no per-feature # regularization, and only monotonicity constraints. rtl_layer_feature_configs = copy.deepcopy(feature_configs) for feature_config in rtl_layer_feature_configs: feature_config.lattice_size = 2 feature_config.unimodality = 'none' feature_config.reflects_trust_in = None feature_config.dominates = None feature_config.regularizer_configs = None # This is a calibrated lattice ensemble model: inputs are calibrated, then # combined non-linearly and averaged using multiple lattice layers. rtl_layer_ensemble_model_config = tfl.configs.CalibratedLatticeEnsembleConfig( feature_configs=rtl_layer_feature_configs, lattices='rtl_layer', num_lattices=5, lattice_rank=3, output_min=min_label, output_max=max_label - numerical_error_epsilon, output_initialization=[min_label, max_label], random_seed=42) # A CalibratedLatticeEnsemble premade model constructed from the given # model config. Note that we do not have to specify the lattices by calling # a helper function (like before with random) because the RTL Layer will take # care of that for us. rtl_layer_ensemble_model = tfl.premade.CalibratedLatticeEnsemble( rtl_layer_ensemble_model_config) # Let's plot our model. tf.keras.utils.plot_model( rtl_layer_ensemble_model, show_layer_names=False, rankdir='LR') ``` 이전과 마찬가지로 모델을 컴파일하고 적합하도록 맞추고 평가합니다. ``` rtl_layer_ensemble_model.compile( loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.AUC()], optimizer=tf.keras.optimizers.Adam(LEARNING_RATE)) rtl_layer_ensemble_model.fit( train_xs, train_ys, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=False) print('Test Set Evaluation...') print(rtl_layer_ensemble_model.evaluate(test_xs, test_ys)) ``` ### Crystal 격자 앙상블 사전 제작은 또한 [Crystal](https://papers.nips.cc/paper/6377-fast-and-flexible-monotonic-functions-with-ensembles-of-lattices) 이라는 휴리스틱 특성 배열 알고리즘을 제공합니다. Crystal 알고리즘을 사용하기 위해 먼저 쌍별 특성 상호 작용을 추정하는 사전 적합 모델을 훈련합니다. 그런 다음 더 많은 비선형 상호 작용이 있는 특성이 같은 격자에 있도록 최종 앙상블을 배열합니다. 사전 제작 라이브러리는 사전 적합 모델 구성을 구성하고 결정 구조를 추출하기 위한 도우미 함수를 제공합니다. 사전 적합 모델은 완전하게 훈련될 필요가 없으므로 몇 번의 epoch면 충분합니다. 이 예제에서는 5개의 격자와 격자당 3개의 특성이 있는 보정된 격자 앙상블 모델을 만듭니다. ``` # This is a calibrated lattice ensemble model: inputs are calibrated, then # combines non-linearly and averaged using multiple lattice layers. crystals_ensemble_model_config = tfl.configs.CalibratedLatticeEnsembleConfig( feature_configs=feature_configs, lattices='crystals', num_lattices=5, lattice_rank=3, output_min=min_label, output_max=max_label - numerical_error_epsilon, output_initialization=[min_label, max_label], random_seed=42) # Now that we have our model config, we can construct a prefitting model config. prefitting_model_config = tfl.premade_lib.construct_prefitting_model_config( crystals_ensemble_model_config) # A CalibratedLatticeEnsemble premade model constructed from the given # prefitting model config. prefitting_model = tfl.premade.CalibratedLatticeEnsemble( prefitting_model_config) # We can compile and train our prefitting model as we like. prefitting_model.compile( loss=tf.keras.losses.BinaryCrossentropy(), optimizer=tf.keras.optimizers.Adam(LEARNING_RATE)) prefitting_model.fit( train_xs, train_ys, epochs=PREFITTING_NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=False) # Now that we have our trained prefitting model, we can extract the crystals. tfl.premade_lib.set_crystals_lattice_ensemble(crystals_ensemble_model_config, prefitting_model_config, prefitting_model) # A CalibratedLatticeEnsemble premade model constructed from the given # model config. crystals_ensemble_model = tfl.premade.CalibratedLatticeEnsemble( crystals_ensemble_model_config) # Let's plot our model. tf.keras.utils.plot_model( crystals_ensemble_model, show_layer_names=False, rankdir='LR') ``` 이전과 마찬가지로 모델을 컴파일하고 적합하도록 맞추고 평가합니다. ``` crystals_ensemble_model.compile( loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.AUC()], optimizer=tf.keras.optimizers.Adam(LEARNING_RATE)) crystals_ensemble_model.fit( train_xs, train_ys, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=False) print('Test Set Evaluation...') print(crystals_ensemble_model.evaluate(test_xs, test_ys)) ```
github_jupyter
<a href="https://colab.research.google.com/github/claytonchagas/intpy_prod/blob/main/9_4_automatic_evaluation_dataone_Digital_RADs_ast_only_files.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !sudo apt-get update !sudo apt-get install python3.9 !python3.9 -V !which python3.9 ``` #**i. Colab hardware and software specs:** - n1-highmem-2 instance - 2vCPU @ 2.3GHz - 13GB RAM - 100GB Free Space - idle cut-off 90 minutes - maximum lifetime 12 hours ``` # Colab hardware info (processor and memory): # !cat /proc/cpuinfo # !cat /proc/memoinfo # !lscpu !lscpu | egrep 'Model name|Socket|Thread|NUMA|CPU\(s\)' print("---------------------------------") !free -m # Colab SO structure and version !ls -a print("---------------------------------") !ls -l / print("---------------------------------") !lsb_release -a ``` #**ii. Cloning IntPy repository:** - https://github.com/claytonchagas/intpy_dev.git ``` !git clone https://github.com/claytonchagas/intpy_dev.git !ls -a print("---------------------------------") %cd intpy_dev/ !git checkout 7b2fe6c !ls -a print("---------------------------------") !git branch print("---------------------------------") #!git log --pretty=oneline --abbrev-commit #!git log --all --decorate --oneline --graph ``` #**iii. dataone_Digital_RADs experiments' evolutions and cutoff by approach** - This evaluation does not make sense as the simulation parameters are fixed. #**iv. dataone_Digital_RADs distribution experiments', three mixed trials** - This evaluation does not make sense as the simulation parameters are fixed. #**1. Fast execution, all versions (v0.1.x and from v0.2.1.x to v0.2.7.x)** ##**1.1 Fast execution: only intra-cache** ###**1.1.1 Fast execution: only intra-cache => experiment's executions** ``` !cd Digital_RADs;\ rm -rf .intpy;\ echo "IntPy only intra-cache";\ experimento=Digital_RADs.py;\ echo "Experiment: $experimento";\ for i in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v026x" "v027x";\ do rm -rf output_intra_$i.dat;\ rm -rf .intpy;\ echo "---------------------------------";\ echo "IntPy version $i";\ for j in {1..5};\ do echo "Execution $j";\ rm -rf .intpy;\ if [ "$i" = "--no-cache" ]; then python3.9 $experimento NC_005213.fasta out.fasta 1 GAATC 2 $i >> output_intra_$i.dat;\ else python3.9 $experimento NC_005213.fasta out.fasta 1 GAATC 2 -v $i >> output_intra_$i.dat;\ fi;\ echo "Done execution $j";\ done;\ echo "Done IntPy distribution version $i";\ done;\ !ls -a %cd Digital_RADs/ !ls -a !echo "Statistics evaluation:";\ rm -rf stats_intra.dat;\ for k in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v026x" "v027x";\ do echo "Statistics version $k" >> stats_intra.dat;\ echo "Statistics version $k";\ python3.9 stats_colab_digi_rads.py output_intra_$k.dat;\ python3.9 stats_colab_digi_rads.py output_intra_$k.dat >> stats_intra.dat;\ echo "---------------------------------";\ done;\ ``` ###**1.1.2 Fast execution: only intra-cache => charts generation** ``` %matplotlib inline import matplotlib.pyplot as plt versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v026x', 'v027x'] colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:brown', 'tab:pink'] filev = "f_intra_" data = "data_intra_" dataf = "dataf_intra_" for i, j in zip(versions, colors): filev_version = filev+i data_version = data+i dataf_version = dataf+i file_intra = open("output_intra_"+i+".dat", "r") data_intra = [] dataf_intra = [] for x in file_intra.readlines()[89::90]: data_intra.append(float(x)) file_intra.close() #print(data_intra) for y in data_intra: dataf_intra.append(round(y, 5)) print(i+": ",dataf_intra) running1_1 = ['1st', '2nd', '3rd', '4th', '5th'] plt.figure(figsize = (10, 5)) plt.bar(running1_1, dataf_intra, color =j, width = 0.4) plt.grid(axis='y') for index, datas in enumerate(dataf_intra): plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold') plt.xlabel("Running only with intra cache "+i, fontweight='bold') plt.ylabel("Time in seconds", fontweight='bold') plt.title("Chart "+i+" intra - Heat distribution - with intra cache, no inter cache - IntPy "+i+" version", fontweight='bold') plt.savefig("chart_intra_"+i+".png") plt.close() #plt.show() import matplotlib.pyplot as plt file_intra = open("stats_intra.dat", "r") data_intra = [] for x in file_intra.readlines()[5::8]: data_intra.append(round(float(x[8::]), 5)) file_intra.close() print(data_intra) versions = ["--no-cache", "0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.6.x", "0.2.7.x"] #colors =['royalblue', 'forestgreen', 'orangered', 'purple', 'skyblue', 'lime', 'lightgrey', 'tan'] colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:brown', 'tab:pink'] plt.figure(figsize = (10, 5)) plt.bar(versions, data_intra, color = colors, width = 0.7) plt.grid(axis='y') for index, datas in enumerate(data_intra): plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold') plt.xlabel("Median for 5 executions in each version, intra cache", fontweight='bold') plt.ylabel("Time in seconds", fontweight='bold') plt.title("Heat distribution, cache intra-running, comparison of all versions", fontweight='bold') plt.savefig('compare_median_intra.png') plt.close() #plt.show() ``` ##**1.2 Fast execution: full cache -> intra and inter-cache** ###**1.2.1 Fast execution: full cache -> intra and inter-cache => experiment's executions** ``` !rm -rf .intpy;\ echo "IntPy full cache -> intra and inter-cache";\ experimento=Digital_RADs.py;\ echo "Experiment: $experimento";\ for i in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v026x" "v027x";\ do rm -rf output_full_$i.dat;\ rm -rf .intpy;\ echo "---------------------------------";\ echo "IntPy version $i";\ for j in {1..5};\ do echo "Execution $j";\ if [ "$i" = "--no-cache" ]; then python3.9 $experimento NC_005213.fasta out.fasta 1 GAATC 2 $i >> output_full_$i.dat;\ else python3.9 $experimento NC_005213.fasta out.fasta 1 GAATC 2 -v $i >> output_full_$i.dat;\ fi;\ echo "Done execution $j";\ done;\ echo "Done IntPy distribution version $i";\ done;\ #!ls -a #%cd Digital_RADs/ !ls -a !echo "Statistics evaluation:";\ rm -rf stats_full.dat;\ for k in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v026x" "v027x";\ do echo "Statistics version $k" >> stats_full.dat;\ echo "Statistics version $k";\ python3.9 stats_colab_digi_rads.py output_full_$k.dat;\ python3.9 stats_colab_digi_rads.py output_full_$k.dat >> stats_full.dat;\ echo "---------------------------------";\ done;\ ``` ###**1.2.2 Fast execution: full cache -> intra and inter-cache => charts generation** ``` %matplotlib inline import matplotlib.pyplot as plt versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v026x', 'v027x'] colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:brown', 'tab:pink'] filev = "f_full_" data = "data_full_" dataf = "dataf_full_" for i, j in zip(versions, colors): filev_version = filev+i data_version = data+i dataf_version = dataf+i file_full = open("output_full_"+i+".dat", "r") data_full = [] dataf_full = [] for x in file_full.readlines()[89::90]: data_full.append(float(x)) file_full.close() for y in data_full: dataf_full.append(round(y, 5)) print(i+": ",dataf_full) running1_1 = ['1st', '2nd', '3rd', '4th', '5th'] plt.figure(figsize = (10, 5)) plt.bar(running1_1, dataf_full, color =j, width = 0.4) plt.grid(axis='y') for index, datas in enumerate(dataf_full): plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold') plt.xlabel("Running full cache "+i, fontweight='bold') plt.ylabel("Time in seconds", fontweight='bold') plt.title("Chart "+i+" full - Heat distribution - with intra and inter cache - IntPy "+i+" version", fontweight='bold') plt.savefig("chart_full_"+i+".png") plt.close() #plt.show() import matplotlib.pyplot as plt file_full = open("stats_full.dat", "r") data_full = [] for x in file_full.readlines()[5::8]: data_full.append(round(float(x[8::]), 5)) file_full.close() print(data_full) versions = ["--no-cache", "0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.6.x", "0.2.7.x"] #colors =['royalblue', 'forestgreen', 'orangered', 'purple', 'skyblue', 'lime', 'lightgrey', 'tan'] colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:brown', 'tab:pink'] plt.figure(figsize = (10, 5)) plt.bar(versions, data_full, color = colors, width = 0.7) plt.grid(axis='y') for index, datas in enumerate(data_full): plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold') plt.xlabel("Median for 5 executions in each version, full cache", fontweight='bold') plt.ylabel("Time in seconds", fontweight='bold') plt.title("Heat distribution, cache intra and inter-running, all versions", fontweight='bold') plt.savefig('compare_median_full.png') plt.close() #plt.show() ``` ##**1.3 Displaying charts to all versions** ###**1.3.1 Only intra-cache charts** ``` versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v026x', 'v027x'] from IPython.display import Image, display for i in versions: display(Image("chart_intra_"+i+".png")) print("=====================================================================================") ``` ###**1.3.2 Full cache charts -> intra and inter-cache** ``` versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v026x', 'v027x'] from IPython.display import Image, display for i in versions: display(Image("chart_full_"+i+".png")) print("=====================================================================================") ``` ###**1.3.3 Only intra-cache: median comparison chart of all versions** ``` from IPython.display import Image, display display(Image("compare_median_intra.png")) ``` ###**1.3.4 Full cache -> intra and inter-cache: median comparison chart of all versions** ``` from IPython.display import Image, display display(Image("compare_median_full.png")) ```
github_jupyter
<a href="https://colab.research.google.com/github/ghost331/Recurrent-Neural-Network/blob/main/Covid_19_Analysis_using_RNN_with_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` #Data: https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv import pandas as pd import numpy as np import matplotlib.pyplot as plt country = "India" #Total COVID confirmed cases df_confirmed = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv") df_confirmed_country = df_confirmed[df_confirmed["Country/Region"] == country] df_confirmed_country = pd.DataFrame(df_confirmed_country[df_confirmed_country.columns[4:]].sum(),columns=["confirmed"]) df_confirmed_country.index = pd.to_datetime(df_confirmed_country.index,format='%m/%d/%y') df_confirmed_country.plot(figsize=(10,5),title="COVID confirmed cases") df_confirmed_country.tail(10) print("Total days in the dataset", len(df_confirmed_country)) #Use data until 14 days before as training x = len(df_confirmed_country)-14 train=df_confirmed_country.iloc[300:x] test = df_confirmed_country.iloc[x:] ##scale or normalize data as the data is too skewed from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(train) train_scaled = scaler.transform(train) test_scaled = scaler.transform(test) ## Use TimeSeriestrain_generator to generate data in sequences. #Alternatively we can create our own sequences. from keras.preprocessing.sequence import TimeseriesGenerator #Sequence size has an impact on prediction, especially since COVID is unpredictable! seq_size = 7 ## number of steps (lookback) n_features = 1 ## number of features. This dataset is univariate so it is 1 train_generator = TimeseriesGenerator(train_scaled, train_scaled, length = seq_size, batch_size=1) print("Total number of samples in the original training data = ", len(train)) # 660 print("Total number of samples in the generated data = ", len(train_generator)) #653 with seq_size=7 #Check data shape from generator x,y = train_generator[10] #Check train_generator #Takes 7 days as x and 8th day as y (for seq_size=7) #Also generate test data test_generator = TimeseriesGenerator(test_scaled, test_scaled, length=seq_size, batch_size=1) print("Total number of samples in the original training data = ", len(test)) # 14 as we're using last 14 days for test print("Total number of samples in the generated data = ", len(test_generator)) # 7 #Check data shape from generator x,y = test_generator[0] from keras.models import Sequential from keras.layers import Dense, LSTM, Dropout, Activation #Define Model model = Sequential() model.add(LSTM(128, activation='relu', return_sequences=True, input_shape=(seq_size, n_features))) model.add(LSTM(64, activation='relu')) model.add(Dense(32)) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error') model.summary() print('Train...') history = model.fit_generator(train_generator, validation_data=test_generator, epochs=30, steps_per_epoch=10) #plot the training and validation accuracy and loss at each epoch loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.plot(epochs, loss, 'y', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() #forecast prediction = [] #Empty list to populate later with predictions current_batch = train_scaled[-seq_size:] #Final data points in train current_batch = current_batch.reshape(1, seq_size, n_features) #Reshape ## Predict future, beyond test dates future = 7 #Days for i in range(len(test) + future): current_pred = model.predict(current_batch)[0] prediction.append(current_pred) current_batch = np.append(current_batch[:,1:,:],[[current_pred]],axis=1) ### Inverse transform to before scaling so we get actual numbers rescaled_prediction = scaler.inverse_transform(prediction) time_series_array = test.index #Get dates for test data #Add new dates for the forecast period for k in range(0, future): time_series_array = time_series_array.append(time_series_array[-1:] + pd.DateOffset(1)) #Create a dataframe to capture the forecast data df_forecast = pd.DataFrame(columns=["actual_confirmed","predicted"], index=time_series_array) df_forecast.loc[:,"predicted"] = rescaled_prediction[:,0] df_forecast.loc[:,"actual_confirmed"] = test["confirmed"] #Plot df_forecast.plot(title="Predictions for next 7 days") ```
github_jupyter
### Installation ``` pip install -q tensorflow tensorflow-datasets ``` #### Imports ``` import tensorflow as tf import matplotlib.pyplot as plt import numpy as np from tensorflow import keras import tensorflow_datasets as tfds ``` ### Checking datasets ``` print(tfds.list_builders()) ``` ### Getting data Infomation ``` builder = tfds.builder('rock_paper_scissors') info = builder.info print(info) ``` ### Data Preparation ``` train = tfds.load(name='rock_paper_scissors', split="train") test = tfds.load(name='rock_paper_scissors', split='test') ``` ### Iterating over data > To iterate over a tensorflow dataset we do it as follows ``` for data in train: print(data['image'], data['label']) break ``` ### Creating a Numpy data > We are going to scale our data and convert it to a nummpy array ``` train_images = np.array([data['image'].numpy()/255 for data in train]) train_labels =np.array([data['label'].numpy() for data in train]) test_image = np.array([data['image'].numpy()/255 for data in test]) test_labels = np.array([data['label'].numpy() for data in test]) train_images[0] ``` ### Class Names 0 - Rock 1 - Paper 2 - Scissors ``` class_names = np.array(["rock", "paper", "scissor"]) ``` ### Creating a NN ``` input_shape = train_images[0].shape input_shape model = keras.Sequential([ keras.layers.Conv2D(32, (3, 3), input_shape=input_shape, activation='relu'), keras.layers.MaxPool2D((3,3)) , keras.layers.Conv2D(64, (2, 2), activation='relu'), keras.layers.MaxPool2D((2,2)), keras.layers.Conv2D(64, (2, 2), activation='relu'), keras.layers.MaxPool2D((2,2)), keras.layers.Flatten(), keras.layers.Dense(64, activation='relu'), keras.layers.Dense(32, activation='relu'), keras.layers.Dense(3, activation='softmax') ]) model.summary() ``` ### Combiling the Model ``` model.compile( optimizer = keras.optimizers.Adam(learning_rate=.0001), metrics=["accuracy"], loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) ) ``` ### Fitting the ModeL ``` EPOCHS = 5 BATCH_SIZE = 4 VALIDATION_SET = (test_image, test_labels) history = model.fit(train_images, train_labels, epochs=EPOCHS, validation_data=VALIDATION_SET, batch_size=BATCH_SIZE) ``` ### Model Evaluation Conclusion Our model is performing perfect. The loss on the train_set is almost 0 as well as the validation loss. The accuracy on the train set is `100%` compared to `83%` accuracy on the test set. > The model is just overtraining but giving us good results on the validation set. ### Making Predictions ``` predictions = model.predict(test_image[:10]) for i, j in zip(predictions, test_labels[:10]): print(class_names[np.argmax(i)],"-------->", class_names[j]) ``` ### Tunning Hyper Parameters -- Keras-Tunner * [Docs](https://www.tensorflow.org/tutorials/keras/keras_tuner) ### Installation ``` pip install -q -U keras-tuner ``` ### Importing ``` import kerastuner as kt def model_builder(hp): model = keras.Sequential() # we want the model to find the best unit and the activation function for the first layer for us model.add(keras.layers.Conv2D(hp.Int('units', min_value=32, max_value=512, step=32),(3, 3), input_shape=input_shape, activation=hp.Choice('activation-fn',values=['relu', 'sgd']))) model.add(keras.layers.MaxPool2D((3,3))) model.add(keras.layers.Conv2D(64, (2, 2), activation='relu')) model.add(keras.layers.MaxPool2D((2,2))) model.add(keras.layers.Conv2D(64, (2, 2), activation='relu')) model.add(keras.layers.MaxPool2D((2,2))) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(64, activation='relu')) model.add(keras.layers.Dense(32, activation='relu')) model.add(keras.layers.Dense(3, activation='softmax')) model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) return model tuner = kt.Hyperband(model_builder, objective='val_accuracy', max_epochs=10, ) tuner.search(train_images, train_labels, validation_data=VALIDATION_SET, epochs=EPOCHS, batch_size=BATCH_SIZE) ``` > That's basically how the `kerastunner` works
github_jupyter
### Test spatial distribution of molecular clusters: 1) to determine the spatiall distribution of molecular cell types (a.k.a. whether they are clustered, dispersed or uniformly distributed), we compared the cell types with a CSR (complete spatial randomness) process and performed a monte carlo test of CSR (Cressie; Waller). We simulated the CSR process by randomly sampling cells in the data 1,000 times to generate a distribution of the averaged distance to nearest neighbor under CSR (ANNCSR). The number of random sampled cells was matched to that in each molecular cell type. The ANN from each molecular cell types (ANNMol) was calculated and compared to the CSR distribution to calculate the p-value. 2) to determine whether the molecular cell types are enriched within proposed subregions, we used an approach similar to the Quadrat statistic (Cressie; Waller), instead of quadrat, the proposed anatomical parcellations are used for this analysis. One hypothesis was that the unequal distributions of molecular types within propose LHA subdomains are due to differences in cell/point densities in these subregions. To test this, we simulated the distribution by shuffling neurons' molecular identity 1000 times to compute the distribution of the χ 2 statistics for each cell type. The χ 2 statistic from the observed molecular cell types was compared to the distribution of expected χ 2 statistics under the above hypothesis to calculate the p values. 3) to determine which subregion the given molecular cluster is enriched in, we performed the permutation test, where we shuffled the position of neurons from each molecular type 1,000 times and calculated the distribution of regional enrichment for any given molecular cell type. The observed fraction of neurons enriched in a given subregion from each molecular cell type was compared to the expected distribution from the random process to calculate the p values. ``` import os, sys import numpy as np import pandas as pd from glob import glob from skimage.io import imread, imsave from os.path import abspath, dirname import matplotlib.pyplot as plt import matplotlib matplotlib.style.use('default') from scipy import stats, spatial import seaborn as sns from scipy.stats import kde, pearsonr from sklearn.utils import shuffle #import scanpy as sc lha_neuron=pd.read_csv('directory/spotcount/neuron',sep=',', index_col=0) ex_m=pd.read_csv('/Slc17a6/molecular/type/metadata',sep=',', index_col=0) inh_m=pd.read_csv('/Slc32a1/molecular/type/metadata',sep=',', index_col=0) lha_neuron=lha_neuron.T lha_neuron=lha_neuron.where(lha_neuron>=0, 0) roi=pd.read_csv('directory/roi/metadata',sep=',', index_col=0) cluster=pd.concat([ex_m,inh_m],axis=0) c=['Ex-1', 'Ex-2', 'Ex-3', 'Ex-4', 'Ex-5', 'Ex-6', 'Ex-7', 'Ex-8', 'Ex-9', 'Ex-10', 'Ex-11', 'Ex-12', 'Ex-13', 'Ex-14', 'Ex-15', 'Ex-16', 'Ex-17', 'Ex-18', 'Ex-19', 'Ex-20', 'Ex-21', 'Ex-22', 'Ex-23', 'Ex-24', 'Ex-25','Inh-1', 'Inh-2','Inh-3', 'Inh-4', 'Inh-5', 'Inh-6', 'Inh-7', 'Inh-8', 'Inh-9', 'Inh-10', 'Inh-11', 'Inh-12', 'Inh-13', 'Inh-14', 'Inh-15', 'Inh-16', 'Inh-17', 'Inh-18', 'Inh-19', 'Inh-20', 'Inh-21', 'Inh-22', 'Inh-23'] ``` ###### Generate random distribution and compute ANN ``` distrib=pd.DataFrame(np.empty([len(c),1000]),index=c,columns=range(1,1001)) for n in c: for i in range(1,1001): idx=np.random.choice(roi.index,df.loc[n,'size'].astype('int')) X=roi[roi.index.isin(idx)] dist,r=spatial.KDTree(X.to_numpy()[:,:3]).query(X.to_numpy()[:,:3], k=2) distrib.loc[n,i]=np.mean(dist[dist!=0]) matrix=pd.DataFrame(np.empty([len(c),0]),index=c) for n in c: C=roi[roi.index.isin(cluster[cluster.x==n].index)].to_numpy()[:,:3] dist,r=spatial.KDTree(C).query(C, k=2) matrix.loc[n,'ANN']=np.mean(dist[dist!=0]) csr_test=pd.DataFrame(np.empty([len(c),0]),index=c) csr_test.loc[j,'p_value']=-1 csr_test.loc[j,'diff']=-1 for j in c: d=distrib.loc[j].to_numpy() a=len(d[d<=matrix.loc[j,'ANN']]) # b=1001-a csr_test.loc[j,'p_value']=a/1001 csr_test.loc[j,'diff']=matrix.loc[j,'ANN']-distrib.loc[j].min() ``` ###### χ 2 test ``` img=imread('LHA/parcellation/mask') A=roi.copy() A=A[(A.x<777)&(A.y<772)&(A.z<266)] # roi.loc[:,'subregion']=0 lb=np.unique(img[img!=0]) df_q=pd.DataFrame(np.zeros([len(c),len(lb)]),index=c,columns=lb) for j in c: C=A[A.index.isin(cluster[cluster.x==j].index)] for x in C.index: coord=np.array(np.floor(C.loc[x].to_numpy()[:3])-1) C.loc[x,'subregion']=img[tuple(coord)] roi.loc[x,'subregion']=img[tuple(coord)] if len(C)>0: for y in lb: df_q.loc[j,y]=len(C[C['subregion']==y]) ``` ###### Shuffle data and compare spatial distribution within LHA parcellations ``` from sklearn.utils import shuffle a={} for j in c: shuffle_s=pd.DataFrame(np.zeros([1000,len(lb)]),columns=lb) for ind in range (0,1000): roi_s=shuffle(roi.subregion.to_numpy()) roi_shuffle=roi.copy() roi_shuffle['subregion']=roi_s X=roi_shuffle[roi_shuffle.index.isin(cluster[cluster.x==j].index)] if len(X)>0: for y in lb: shuffle_s.loc[ind,y]=len(X[X['subregion']==y]) ind+=1 a[j]=shuffle_s for j in c: a[j]=a[j].rename(columns={1.0: "LHAd-db",3.0: "LHAdl",4.0: "LHAs-db",5.0: "ZI", 6.0: "EP", 7.0: "fornix",9.0: "LHA-vl",11.0:"LHAf",17.0:"LHAhcrt-db"}) a[j]=a[j][['ZI', 'LHAd-db','LHAhcrt-db','LHAdl','LHAf','fornix', 'LHAs-db','LHA-vl','EP']] a[j]=a[j].drop(columns='fornix') a[j]=a[j].rename(columns={"LHA-vl":"LHAf-l"}) chi_square_shuffle=pd.DataFrame(np.zeros([len(c),1000]),index=c) for i in c: for ind in range(0,1000): chi_square_shuffle.loc[i,ind]=stats.mstats.chisquare(a[i].loc[ind,:])[0] for i in c: d=stats.chisquare(df_q.loc[i,:])[0] chi_square.loc[i,'r_pval']=len(np.where(chi_square_shuffle.loc[i,:]>d)[0])/1000 ``` ###### permutation (shuffle) test to determine which LHA subregion molecular cell types are enriched in ``` A=roi.copy() A=A[(A.x<777)&(A.y<772)&(A.z<266)] roi.loc[:,'subregion']=0 lb=np.unique(img[img!=0]) df=pd.DataFrame(np.zeros([len(c),len(lb)]),index=c,columns=lb) for j in c: C=A[A.index.isin(cluster[cluster.x==j].index)] for x in C.index: coord=np.array(np.floor(C.loc[x].to_numpy()[:3])-1) C.loc[x,'subregion']=img[tuple(coord)] roi.loc[x,'subregion']=img[tuple(coord)] if len(C)>0: for y in lb: df.loc[j,y]=len(C[C.subregion==y])/len(C) df=df.rename(columns={1.0: "LHAd-db",3.0: "LHAdl",4.0: "LHAs-db",5.0: "ZI", 6.0: "EP", 7.0: "fornix",9.0: "LHAf-l",11.0:"LHAf",17.0:"LHAhcrt-db"}) df=df[['ZI', 'LHAd-db','LHAhcrt-db','LHAdl','LHAf','fornix', 'LHAs-db','LHAf-l','EP']] A=roi.copy() A=A[(A.x<777)&(A.y<772)&(A.z<266)] A.loc[:,'shuffle']=1 for i in c: m[i]=np.zeros([len(lb)+1,1]) for ind in range(1,1001): cluster_shuffle=cluster.copy() cluster_shuffle.x=shuffle(cluster.x.to_numpy()) for i in c: ct=A[A.index.isin(cluster_shuffle[cluster_shuffle.x==i].index)] x=pd.DataFrame(data=np.zeros([len(lb)+1,1]),index=[0,1,3,4,5,6,7,9,11,17], columns=['shuffle']) y=ct.groupby('subregion').sum() for j in y.index: x.loc[j,'shuffle']=y.loc[j,'shuffle']/len(ct) m[i]=np.append(m[i],x.to_numpy().reshape(10,1),axis=1) df_p=pd.DataFrame(data=np.ones(df.shape),index=df.index,columns=df.columns) df_p.shape ind=0 for i in c: print(i) for n in range(0,9): print(n,ind) df_p.iloc[ind,n]=len(np.where(m[i][n,1:]>df.iloc[ind,n])[0])/1000 ind+=1 df_p=df_p.reindex(df_p.index[a.dendrogram_col.reordered_ind]) df_p=df_p[df_p.columns[::-1]] df_p=df_p.drop(columns='fornix') ```
github_jupyter
``` %pylab inline #%matplotlib qt from __future__ import division # use so 1/2 = 0.5, etc. import sk_dsp_comm.sigsys as ss import sk_dsp_comm.iir_design_helper as iir_d import sk_dsp_comm.pyaudio_helper as pah import scipy.signal as signal import time import sys import imp # for module development and reload() from IPython.display import Audio, display from IPython.display import Image, SVG pylab.rcParams['savefig.dpi'] = 100 # default 72 #pylab.rcParams['figure.figsize'] = (6.0, 4.0) # default (6,4) #%config InlineBackend.figure_formats=['png'] # default for inline viewing %config InlineBackend.figure_formats=['svg'] # SVG inline viewing #%config InlineBackend.figure_formats=['pdf'] # render pdf figs for LaTeX #Image('filename.png',width='80%') ``` # Static/Simulation-Based Audio Processing ## Notch Filters to Remove Interference Within `sigsys` there are some handy functions for designing single section notch filters and then cascading them. First set up a scenario with tone interference present in speech. ``` fs,s = ss.from_wav('OSR_us_000_0030_8k.wav') soi = s[10000:40000] n = arange(len(soi)) snoi = 0.4*cos(2*pi*1000/fs*n) + 0.5*cos(2*pi*1500/fs*n) r = soi + snoi psd(r,2**10,8000); title(r'Two Interfering Tones'); ``` Look at the waveform and then listen to it. ``` # First save r to a wave file for static playback ss.to_wav('speech_tone.wav',8000,r/2) Audio('speech_tone.wav') ``` Design a cascade of notch filters: ``` plot(r[6000:10000]) title(r'The Interference is Overwhelming') bn1, an1 = ss.fir_iir_notch(1000,8000) bn2, an2 = ss.fir_iir_notch(1500,8000,.98) # tighten the bandwidth of the 12k notch bn, an = ss.cascade_filters(bn1,an1,bn2,an2) iir_d.freqz_resp_list([bn],[an],'dB',8000) grid(); ``` Now apply the filter to the composite signal:: ``` z = signal.lfilter(bn,an,r) specgram(z,512,8000); # First save z to a wave file for static playback ss.to_wav('speech_tone_notch.wav',8000,z) Audio('speech_tone_notch.wav') ``` ## Adaptive Interference Removal (Placeholder, but start from `ss.lms_ic`) ```python def lms_ic(r,M,mu,delta=1): """ Least mean square (LMS) interference canceller adaptive filter. A complete LMS adaptive filter simulation function for the case of interference cancellation. Used in the digital filtering case study. Parameters ---------- M : FIR Filter length (order M-1) delta : Delay used to generate the reference signal mu : LMS step-size delta : decorrelation delay between input and FIR filter input Returns ------- n : ndarray Index vector r : ndarray noisy (with interference) input signal r_hat : ndarray filtered output (NB_hat[n]) e : ndarray error sequence (WB_hat[n]) ao : ndarray final value of weight vector F : ndarray frequency response axis vector Ao : ndarray frequency response of filter Examples ---------- >>> # import a speech signal >>> fs,s = from_wav('OSR_us_000_0030_8k.wav') >>> # add interference at 1kHz and 1.5 kHz and >>> # truncate to 5 seconds >>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500]) >>> # simulate with a 64 tap FIR and mu = 0.005 >>> n,r,r_hat,e,ao,F,Ao = lms_ic(r,64,0.005) """ ``` # Audio Special Effects Consider *flanging*, which is a combination of a direct signal path and a time varying delay path. In `digitalcom` there is a function for creating a time-varying delay, $\beta[n]$, that works well for communication systems impairments, but is useful here as well. Understand that a time varying delay is compressing and expanding the time axis just like the Doppler effect. In music this causes the pitch to wobble, but at slow rates introduces a whooshing effect, made popular in rock music many years ago. ``` Image('images/Flanging_Block.png',width='80%') ``` The time varying delay in flanging takes the form: $$ \beta[n] = D_p \big(1+\cos(2\pi f_0/f_s)\big) $$ where here $D_p = 50$ and $f_0 \simeq 1$ Hz or less. Import some sound files and set up the time varying delay. ``` import sk_dsp_comm.digitalcom as dc # for time delay Audio('c_major.wav') fs,s12 = ss.from_wav('Music_Test.wav') fs # Processing is slow because the time axis interpolation is subsample-based # Using a 3rd-order Lagrange interpolator fs,s1 = ss.from_wav('c_major.wav') #fs,s12 = ss.from_wav('Music_Test.wav') #s1 = (s12[:,0] + s12[:,0])/2 n = arange(len(s1)) f0 = 1 Dp = 50 D = Dp*(1 + cos(2*pi*f0/fs*n)) x = dc.time_delay(s1,D + 2, 2*Dp+4) x_wav = x # for PyAudio playback # Flanged versions of c_major.wav and Music_Test.wav #ss.to_wav('flanger_audio_c_major.wav',44100,x_wav) ss.to_wav('flanger_audio_Music_Test.wav',44100,x_wav) ``` #### Playback using PyAudio (one channel only) ``` pah.available_devices() # define callback # Here we configure the callback to play back a wav file def callback(in_data, frame_count, time_info, status): # Ignore in_data when generating output only #*********************************************** global x # Note wav is scaled to [-1,1] so need to rescale to int16 y = 32767*x.get_samples(frame_count) # Save data for later analysis # accumulate a new frame of samples DSP_IO.DSP_capture_add_samples(y) #*********************************************** # Convert from float back to int16 y = y.astype(int16) return y.tobytes(), pah.pyaudio.paContinue x = pah.loop_audio(x_wav) DSP_IO = pah.DSP_io_stream(callback,0,1,fs=44100,Tcapture=2) DSP_IO.stream(20) # may need to change time ```
github_jupyter
<a href="https://colab.research.google.com/github/skimotv/SkimoTextSummarizer/blob/master/Multiple_Summarizer_Tests.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !pip install -U git+https://github.com/huggingface/transformers.git import torch from transformers import AutoTokenizer, AutoModelWithLMHead # Use a model from https://huggingface.co/models?filter=summarization&sort=modified models = ["sshleifer/distilbart-cnn-12-3", "sshleifer/distilbart-cnn-12-6", "sshleifer/distilbart-cnn-6-6", "sshleifer/distilbart-xsum-1-1", "sshleifer/distilbart-xsum-12-1", "sshleifer/distilbart-xsum-12-3", "sshleifer/distilbart-xsum-12-6", "sshleifer/distilbart-xsum-6-6", "sshleifer/distilbart-xsum-9-6", "google/pegasus-billsum" ] tokenizer = AutoTokenizer.from_pretrained(models[9]) model = AutoModelWithLMHead.from_pretrained(models[9]) text = "Four overnight camps in Maine successfully identified and isolated three Covid-19 positive people with no symptoms, preventing transmission to more than 1,000 other campers and staff this summer, says a new report published by the Centers for Disease Control and Prevention.For many kids, summer camp looked and felt a little different this year. There were daily temperatures checks, more time spent outside and plenty of face masks. Dr. Laura Blaisdell of the Maine Medical Center Research Institute and colleagues said the extra effort paid off.They detailed where these camps went right in a report examining 642 children and 380 staff members who attended the four camps in Maine for well over a month between June and August. Camp attendees traveled from across the United States and six international locations: Bermuda, Canada, Mexico, South Africa, Spain and the United Kingdom. They quarantined for up to 14 days before arriving at camp and three of the sites asked campers to submit Covid-19 test results before attending. This was an important step in preventing introduction of the virus in a setting with many young adults who could be asymptomatic or presymptomatic, Blaisdell and colleagues wrote in the CDC's weekly report.Camp attendees were separated into groups when they first arrived and had to wear face coverings when interacting with people outside of their groups. The camps kept surfaces clean and groups physically distant. They staggered bathroom use and dining times. They also screened campers daily for fever and coronavirus symptoms. Most attendees were tested again for Covid-19 a few days after arriving at camp. That's when a symptomless camper and two staff members tested positive, according to the report. They were rapidly isolated until they recovered, and their contacts were quarantined for 14 days. (CNN)Four overnight camps in Maine successfully identified and isolated three Covid-19 positive people with no symptoms, preventing transmission to more than 1,000 other campers and staff this summer, says a new report published by the Centers for Disease Control and Prevention.For many kids, summer camp looked and felt a little different this year. There were daily temperatures checks, more time spent outside and plenty of face masks. Dr. Laura Blaisdell of the Maine Medical Center Research Institute and colleagues said the extra effort paid off.They detailed where these camps went right in a report examining 642 children and 380 staff members who attended the four camps in Maine for well over a month between June and August.A Georgia sleepaway camp&#39;s coronavirus outbreak is a warning for what could happen when schools reopen, CDC saysA Georgia sleepaway camp's coronavirus outbreak is a warning for what could happen when schools reopen, CDC saysCamp attendees traveled from across the United States and six international locations: Bermuda, Canada, Mexico, South Africa, Spain and the United Kingdom. They quarantined for up to 14 days before arriving at camp and three of the sites asked campers to submit Covid-19 test results before attending.Content by CNN UnderscoredHow to sell your old tech before it loses its value.CNN Underscored partnered with Decluttr to create this content. When you make a purchase, CNN receives revenue.This was an important step in preventing introduction of the virus in a setting with many young adults who could be asymptomatic or presymptomatic, Blaisdell and colleagues wrote in the CDC's weekly report.Camp attendees were separated into groups when they first arrived and had to wear face coverings when interacting with people outside of their groups. The camps kept surfaces clean and groups physically distant. They staggered bathroom use and dining times. They also screened campers daily for fever and coronavirus symptoms.Covid-19 child cases in the US have increased by 21% since early August, new data showsCovid-19 child cases in the US have increased by 21% since early August, new data showsMost attendees were tested again for Covid-19 a few days after arriving at camp. That's when a symptomless camper and two staff members tested positive, according to the report. They were rapidly isolated until they recovered, and their contacts were quarantined for 14 days.None of the contacts tested positive for Covid-19, according to the CDC report.The report noted that it wasn't one particular precaution that helped prevent the spread of coronavirus in these camps, but rather a multilayered strategy that was carefully executed." print(len(text)) def summarize(text): inputs = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt") outputs = model.generate(inputs, max_length=250, do_sample=True, top_p=0.95, top_k=60) generated = tokenizer.decode(outputs[0]) return generated sum = summarize(text) print(sum) print(sum) print(len(sum)) ```
github_jupyter
``` import numpy as np import astropy.units as u from astropy.time import Time from astropy.table import Table from sbpy.data import Ephem, Phys from sbpy.activity import (Haser, LTE, NonLTE, photo_timescale, einstein_coeff, intensity_conversion, beta_factor, total_number, from_Haser) import matplotlib.pyplot as plt import matplotlib import astropy.constants as con ``` Calculating Column Density using the NonLTE iterative code `from_pyradex` ======================================= _____________________________________________________________________________________________ `sbpy.activity` offers an implementation of `pyradex` which is a python wrapper for the NonLTE, iterative fortran code called RADEX. Radex is described in [van der Tak et al. 2013](https://ui.adsabs.harvard.edu/abs/2007A%26A...468..627V/abstract). This model takes in an initial guess for the column density and compares the data iteratively against RADEX results, finding the best fit column density for the data. More information about the installation and setup of `pyradex` can be found here [here](https://github.com/keflavich/pyradex), as well as more information about the parameters needed for the [RADEX](https://personal.sron.nl/~vdtak/radex/index.shtml) code. `from_pyradex` returns the best fitting column density based on the data provided which can then be used with the Haser model shown in the previous example. RADEX requires information on collider densities in order to determine collision rates. The collider densities are required to be given in the form of a dictionary. For comets, we expect H2O to be our main collisional partner but RADEX does not contain information on collisional rates for H2O. Therefore, the default value for collider densities in sbpy is a scaled version of the H2 collisional rate to account for H2O. This scaling is prominent in a lot of literature, such as [Walker et al. 2014](https://ui.adsabs.harvard.edu/abs/2014ApJ...790...96W/abstract), [de Val Borro et al. 2017](https://ui.adsabs.harvard.edu/abs/2018MNRAS.474.1099D/abstract), and [Schoier et al. 2004](https://ui.adsabs.harvard.edu/abs/2005A%26A...432..369S/abstract). In the case of this module we have chosen to follow the Walker et al. scaling for deriving H2O-H2O collision rates from H2-H2O coefficients. Within this scaling, we apply the square root of the ratio of reduced masses: $$s = (\frac{m_{H2O}}{m_{H2}})^{0.5}$$ Where `s` is the scale to multiply the collisional density of H2 against in order to obtain H2O-H2O collision rates. For the implementation of this code, the user can either define their chosen first guess for column density, or they can calculate it from their data or JPLSpec data `cdensity_Bockelee`. The literature used for this example is the same as in [this notebook](LTE_prodrate_Haser), [Wierzchos et al. 2018](https://ui.adsabs.harvard.edu/abs/2018AJ....156...34W/abstract): **Important notes:** `pyradex` requires a fortran compiler to be installed in your system. The recommendation is gfortran, which can be installed using [Homebrew](https://brewformulas.org/Gfortran), or any other similar service. Warnings of a missing file and RunTime error are normal from pyradex, if the user wants to find out more about them, see the [`pyradex` docs](https://github.com/keflavich/pyradex). The file error comes from the fact that `sbpy` uses, like pyradex, `astroquery.Lamda` for the molecular data files instead of searching them locally (despite the Fortran code still forcing the search for a local molecular data file). In the future, a function will be added to `sbpy` in which a user may build their own molecular data files from JPLSpec information. For now, LAMDA catalog is the primary source of the molecular data file for the implementation of RADEX. ``` co = Table.read(('data/CO.csv'), format="ascii.csv") error = np.array([0.2, 0.4, 0.4, 0.4, 0.4]) * 10.**28 # +/- error from literature Q_error = np.array(co['Q']) + np.array(error) # upper error limit Q_error = np.log10(np.array(Q_error)) - np.array(co['log(Q)']) co['Q_error'] = Q_error print("Table:\n{}\nColumn Names:\n{}".format(co, co.columns)) ``` Model parameters needed, all values are taken directly from the literature. In this example the molecule identifier will be inputted as a regular expression. Regular expressions for mol_tag can be used but the user must be careful not to provide an ambiguous regular expression. One good thing to remember is that anything between symbols '^' and '\\$' will be matched exactly, therefore you can avoid an ambiguity error by writing your molecule name as such: '^name\\$'. A perfect example of this is with the molecule in this example 'CO', simply writing mol_tag = 'CO' will produce an ambiguity error because it will match CO, CO2, etc. therefore, it is necessary to restrict our molecule name regex to '^CO\\$' as presented below. ``` transition_freq = (230.538 * u.GHz).to('MHz') aper = 10 * u.m # aperture mol_tag = '^CO$' # regex molecule identifier temp_estimate = 25. * u.K vgas = 0.5 * u.km / u.s target = 'C/2016 R2' b = 0.74 # intrinsic antenna value ``` Obtaining molecular data from the JPL Molecular Spectroscopy Catalog using `sbpy.data.phys`. See documentation for a detailed breakdown of the resulting object and the values stored in the object. ``` mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag) # molecular data from JPLSpec intl = intensity_conversion(mol_data) # calculate line intensity mol_data.apply([intl.value] * intl.unit, name='Integrated line intensity at desired temp') # store value ``` Obtaining the Einstein Coefficient. In this example, we will obtain our Einstein Coefficient from LAMDA catalog and append it to our molecular data Phys object. In [this notebook](LTE_prodrate_without_photolysis.ipynb) we have been calculating it through sbpy/JPLSpec. It is possible that your transition frequency values may not exactly match the LAMDA catalog to the 4th significant figure, especially if you're using JPLSpec. Therefore, we recommend when using this method that you match your transition frequency with the LAMDA value over the JPLSpec value, since `from_jplspec` is designed to pick the closest transition frequency within a range of 1 GHz, whereas LAMDA will expect the exact value found in their catalog. ``` from astroquery.lamda import Lamda mol_name = 'CO' # LAMDA molecule name lam_search = Lamda.query(mol=mol_name.lower()) # LAMDA Query lam_result = lam_search[1] # outputs CO table lam_found = lam_result[lam_result['Frequency'] == transition_freq.to('GHz').value] # parse results at frequency au_cat = lam_found['EinsteinA'] # get Einstein Coefficient au_cat = au_cat.data[0] # get value of coefficient au = au_cat / u.s # define the unit mol_data.apply([au.value] * au.unit, name='eincoeff') # store einstein coefficient ``` Initialize the `sbpy.activity.Haser` model in order to perform our production rate calculations. `Q_estimate` first guess for the production rate was obtained running `from_Drahus` for the same data set before doing this example ``` Q_estimate = 3.594*10**(28) / u.s parent = photo_timescale('CO') * vgas # parent photodissociation rate coma = Haser(Q_estimate, vgas, parent) # initializing the model with an estimate ``` Run the `from_pyradex` iterative code on the data to find best fit column densities, and then calculate total number based on telescope geometry. Use Haser model for the calculation of production rates. You can give a column density first guess either using `sbpy.data.LTE` `cdensity_Bockelee` function, or user-defined into the data class. In this example, we'll use `cdensity_Bockelee` Since our data file contains 6 different data points of observation times and integrated flux, we can calculate production rates for all of these 6 data points using a python for loop. **IMPORTANT: Because we are using a for loop, and some of the values that should be appended to the `mol_data` phys object are calculated within the loop itself, we must initialize our columns within the phys object BEFORE performing the loop. This is because you cannot iteratively redefine the same column of data within a phys object, but you CAN change the value of an already defined column as many times as you want. Since our `beta`, `cdensity` and `total_number` values vary with every iteration, and since our production rate needs these values within the loop, we must simply change the value of our already defined columns for beta, column density, and total number everytime we iterate. Keep in mind when you initialize the column you must initialize it with the correct units and correct type (float, int, str). If you get an error saying there are duplicate columns, it is most likely due to what has been mentioned in this note, and you will have to reinitialize your mol_data object before trying to enter more data in** ``` nonlte = NonLTE() q_found_pyradex = [] lte = LTE() for i in range(0, 5): time = Time(co['Time'][i], format='iso') integrated_flux = co['T_B'][i] * u.K * u.km / u.s ephemobj = Ephem.from_horizons(target, epochs=time.jd) beta = beta_factor(mol_data, ephemobj) mol_data['beta'] = beta cdensity_bockelee = lte.cdensity_Bockelee(integrated_flux, mol_data) # col density first guess mol_data['cdensity'] = cdensity_bockelee cdensity = nonlte.from_pyradex(integrated_flux, mol_data) mol_data['cdensity'] = cdensity tnum = total_number(mol_data, aper, b) # total number of molecules in aperture mol_data['total_number'] = tnum Q = from_Haser(coma, mol_data, aper=aper) # production rate from Haser model q_found_pyradex.append(np.log10(Q.value)[0]) q_pred_co = list(co['log(Q)']) print("The Resulting Production Rates for CO in {} using Haser model are:\n {}".format(target, np.round(q_found_pyradex,3))) print("Residuals:\n{}".format(np.round((np.array(q_pred_co)) - (np.array(q_found_pyradex)),3))) print("Literature errors:\n{}".format(np.round(co['Q_error'],3))) time_co = list(co['Time']) time_co = matplotlib.dates.datestr2num(time_co) plt.plot_date(time_co, q_pred_co, 'o', color='slateblue', label='Wierzchos & Womack 2018') plt.plot_date(time_co, q_found_pyradex, 'o', color='hotpink', label='sbpy results') plt.xlabel('Time') plt.ylabel('Log (Q)') plt.legend(loc='best', fontsize='x-small') plt.title('Q vs Time Plot for CO in {}'.format(target)) plt.show() ``` It is clear that the implementation of pyradex gives better results than the example in [this notebook](LTE_prodrate_Haser.ipynb) in comparison to the literature, and it is done entirely within `sbpy` functionalities, offering the user a rigorous way to calculate column densities, and from those, production rates using the Haser model within `sbpy`. Even so, `sbpy` allows for flexibility in terms of data entry. Hardly traceable inconsistencies like are common in cometary studies, since a lot depends on the molecular catalogs that the data is obtained from, or what calculations are used. This is exactly why sbpy offers flexibility in all its functions through the use of `sbpy.data` classes, which allow the user to define their preferrred parameters if they do not happen to be satisfied with the catalog functionalities and derivations of parameters that sbpy offers. Yet we recommend the use of as many sbpy functionalities as possible in order to maintain consistency in your calculations, which may prove to be important when conversations about comet classification arise. Helpful Links ======= ___________________________________________ Relevant Notebooks ----------------- - [How to calculate LTE production rates without photolysis effects](LTE_prodrate_without_photolysis.ipynb) - [How to calculate LTE production rates with Haser model](LTE_prodrate_Haser.ipynb) - [How to use Phys data class and `from_jplspec`](../data/Phys.ipynb) - [What is `astroquery.jplspec`](../data/jplspec.ipynb) Relevant Links ------------- - [LAMDA Queries with astroquery](https://astroquery.readthedocs.io/en/latest/lamda/lamda.html) - [JPLSpec Queries with astroquery](https://astroquery.readthedocs.io/en/latest/jplspec/jplspec.html) - [sbpy Activity Haser Class](https://sbpy.readthedocs.io/en/latest/api/sbpy.activity.Haser.html#sbpy.activity.Haser) - [sbpy Ephem data class](https://sbpy.readthedocs.io/en/latest/sbpy/data/index.html#how-to-use-ephem) - [sbpy Phys data class](https://sbpy.readthedocs.io/en/latest/sbpy/data/index.html#how-to-use-phys) - [sbpy data class alternative field names](https://sbpy.readthedocs.io/en/latest/sbpy/data/fieldnames.html#list-of-alternative-field-names) - [pyradex source code](https://github.com/keflavich/pyradex) - [RADEX fortran source code](https://personal.sron.nl/~vdtak/radex/) - [RADEX homepage](https://personal.sron.nl/~vdtak/radex/index.shtml) - [sbpy citation (please cite our work)](http://joss.theoj.org/papers/10.21105/joss.01426)
github_jupyter
## Product Sentiment Data Data (public domain): https://data.world/crowdflower/brands-and-product-emotions Notebook code based on IMDB notebook from bert-sklearn/other_examples ``` import numpy as np import pandas as pd import os import sys import csv import re from sklearn import metrics from sklearn.metrics import classification_report from sklearn.utils import shuffle from ftfy import fix_text from bert_sklearn import BertClassifier from bert_sklearn import load_model print(os.getcwd()) DATAFILE = "./data/judge-cleaned-up.csv" # Prep Data def cleanup(txt): return fix_text(txt) converters = {'tweet_text': cleanup} raw_data = pd.read_csv(DATAFILE, converters=converters, encoding='unicode_escape') raw_data.head(10) ## Transform columns ## ONLY RUN THIS CELL ONCE!!! # Add columns to make the labels usable by the model # tweet_text => text # Positive / No emotion / Negative => 1, 0, -1 # Product: Apple stuff, Google stuff, NaN => Apple, Google, '' def clean_text(txt): return txt raw_data.insert(1, "text", np.vectorize(clean_text)(raw_data['tweet_text'])) def create_labels(sentiment): if sentiment.startswith('Positive'): return 1 if sentiment.startswith('Negative'): return -1 return 0 raw_data.insert(3, 'label', np.vectorize(create_labels)(raw_data['is_there_an_emotion_directed_at_a_brand_or_product'])) def get_company(product): if pd.isnull(product): return '' if 'iPad' in product or 'iPhone' in product or 'Apple' in product: return 'Apple' if 'Google' in product or 'Android' in product: return 'Google' return '' raw_data.insert(2, 'company', np.vectorize(get_company)(raw_data['emotion_in_tweet_is_directed_at'])) raw_data.head(10) # Last Data Preparation Step # Clean up characters and pull out columns of interest def clean(text): text = re.sub(r'<.*?>', '', text) text = re.sub(r"\"", "", text) return text data = raw_data.filter(['text', 'company', 'label'], axis=1) data['text'] = data['text'].transform(clean) # Split into training and test data msk = np.random.rand(len(data)) < 0.8 train = data[msk] test = data[~msk] print('Training data size: ' + str(train.shape)) print('Test data size: ' + str(test.shape)) train[:1].values ``` As you can see, each review is much longer than a sentence or two. The Google AI BERT models were trained on sequences of max length 512. Lets look at the performance for max_seq_length equal to 128, 256, and 512. ### max_seq_length = 128 ``` ## Set up data for the classifier train = train.sample(800) test = test.sample(500) print("Train data size: %d "%(len(train))) print("Test data size: %d "%(len(test))) X_train = train['text'] y_train = train['label'] X_test = test['text'] y_test = test['label'] ## Create the model model = BertClassifier(bert_model='bert-base-uncased', label_list=[-1,0,1]) model.max_seq_length = 128 model.learning_rate = 2e-05 model.epochs = 4 print(model) %%time ## Train the model using our data (this could take a while) model.fit(X_train, y_train) accy = model.score(X_test, y_test) %%time ## Test out the model with our own invented examples! examples = [ 'This Android product is not very good', 'I could not get that iPhone to work, so I sent it back. I''m really upset!', 'Another great product from the folks at Google! We really liked it a lot', 'My iPad is essential - of course I would buy another one!',' 'When in the course of human events it becomes necessary to dissolve those ties...', 'We the people, in order to form a more perfect union, establish justice, insure domestic tranquility, ...' ] print(model.predict_proba(examples)) model.save('models/model1_128_bb_uncased.mdl') ``` ### max_seq_length = 256 ``` %%time ## Don't use this one - it will take a very long time! model = BertClassifier(bert_model='bert-base-uncased', label_list=[-1,0,1]) model.max_seq_length = 256 model.train_batch_size = 32 model.learning_rate = 2e-05 model.epochs = 4 print(model) model.fit(X_train, y_train) accy = model.score(X_test, y_test) ``` ### max_seq_length = 512 ``` %%time ## Don't use this one - it will take the longest of all! model = BertClassifier(bert_model='bert-base-uncased', label_list=[-1,0,1]) model.max_seq_length = 512 # max_seq_length=512 will use a lot more GPU mem, so I am turning down batch size # and adding gradient accumulation steps model.train_batch_size = 16 model_gradient_accumulation_steps = 4 model.learning_rate = 2e-05 model.epochs = 4 print(model) model.fit(X_train, y_train) accy = model.score(X_test, y_test) ```
github_jupyter
``` from google.colab import drive drive.mount('/content/gdrive') import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import math ``` # Getting Data ``` data=pd.read_csv("/content/gdrive/MyDrive/kag_risk_factors_cervical_cancer.csv",dtype='object') data.head() ``` # Checking if there is any null value ``` data.isnull().sum() #Dropping unnecessary data data=data.drop(['STDs: Time since first diagnosis'], axis = 1) #replacing "?" with nan data = data.replace('?', np.nan) data = data.drop_duplicates() data = data.apply(pd.to_numeric, errors='coerce') data.head(13) data.isnull().sum() # for continuous variable data['Number of sexual partners'] = data['Number of sexual partners'].fillna(data['Number of sexual partners'].median()) data['First sexual intercourse'] = data['First sexual intercourse'].fillna(data['First sexual intercourse'].median()) data['Num of pregnancies'] = data['Num of pregnancies'].fillna(data['Num of pregnancies'].median()) data['Smokes'] = data['Smokes'].fillna(1) data['Smokes (years)'] = data['Smokes (years)'].fillna(data['Smokes (years)'].median()) data['Smokes (packs/year)'] = data['Smokes (packs/year)'].fillna(data['Smokes (packs/year)'].median()) data['Hormonal Contraceptives'] = data['Hormonal Contraceptives'].fillna(1) data['Hormonal Contraceptives (years)'] = data['Hormonal Contraceptives (years)'].fillna(data['Hormonal Contraceptives (years)'].median()) data['IUD'] = data['IUD'].fillna(0) # Under suggestion data['IUD (years)'] = data['IUD (years)'].fillna(0) #Under suggestion data['STDs'] = data['STDs'].fillna(1) data['STDs (number)'] = data['STDs (number)'].fillna(data['STDs (number)'].median()) data['STDs:condylomatosis'] = data['STDs:condylomatosis'].fillna(data['STDs:condylomatosis'].median()) data['STDs:cervical condylomatosis'] = data['STDs:cervical condylomatosis'].fillna(data['STDs:cervical condylomatosis'].median()) data['STDs:vaginal condylomatosis'] = data['STDs:vaginal condylomatosis'].fillna(data['STDs:vaginal condylomatosis'].median()) data['STDs:vulvo-perineal condylomatosis'] = data['STDs:vulvo-perineal condylomatosis'].fillna(data['STDs:vulvo-perineal condylomatosis'].median()) data['STDs:syphilis'] = data['STDs:syphilis'].fillna(data['STDs:syphilis'].median()) data['STDs:pelvic inflammatory disease'] = data['STDs:pelvic inflammatory disease'].fillna(data['STDs:pelvic inflammatory disease'].median()) data['STDs:genital herpes'] = data['STDs:genital herpes'].fillna(data['STDs:genital herpes'].median()) data['STDs:molluscum contagiosum'] = data['STDs:molluscum contagiosum'].fillna(data['STDs:molluscum contagiosum'].median()) data['STDs:AIDS'] = data['STDs:AIDS'].fillna(data['STDs:AIDS'].median()) data['STDs:HIV'] = data['STDs:HIV'].fillna(data['STDs:HIV'].median()) data['STDs:Hepatitis B'] = data['STDs:Hepatitis B'].fillna(data['STDs:Hepatitis B'].median()) data['STDs:HPV'] = data['STDs:HPV'].fillna(data['STDs:HPV'].median()) #for categorical data data = pd.get_dummies(data=data, columns=['Smokes','Hormonal Contraceptives','IUD','STDs', 'Dx:Cancer','Dx:CIN','Dx:HPV','Dx','Hinselmann','Citology','Schiller']) data.isnull().sum() ``` # Data Exploration ``` data.columns # 0 means not cancer affected and 1 means cancer affected cell data['Biopsy'].value_counts() data['Number of sexual partners'].value_counts() # Biopsy vs no. of sexual partners #categorical to categorical fig, (ax1,ax2) = plt.subplots(2, 1, figsize = (15, 8)) sns.countplot(x = 'Number of sexual partners', data = data, ax=ax1) sns.barplot(x = 'Number of sexual partners', y = 'Biopsy', data = data, ax=ax2) #continuous to categorical facet = sns.FacetGrid(data, hue='Biopsy',aspect=4) facet.map(sns.kdeplot,'Number of sexual partners',shade= True) facet.set(xlim=(0, data['Number of sexual partners'].max())) facet.add_legend() # Biopsy vs no. of sexual partners #categorical to categorical fig, (ax1,ax2) = plt.subplots(2, 1, figsize = (15, 8)) sns.countplot(x = 'Number of sexual partners', data = data, ax=ax1) sns.barplot(x = 'Number of sexual partners', y = 'Biopsy', data = data, ax=ax2) #continuous to categorical facet = sns.FacetGrid(data, hue='Biopsy',aspect=4) facet.map(sns.kdeplot,'Number of sexual partners',shade= True) facet.set(xlim=(0, data['Number of sexual partners'].max())) facet.add_legend() # biopsy vs no. of pregnancies sns.factorplot('Num of pregnancies','Biopsy',data = data, size=5, aspect=3) #continuous to categorical facet = sns.FacetGrid(data, hue='Biopsy', aspect=4) facet.map(sns.kdeplot,'Num of pregnancies', shade= True) facet.set(xlim=(0, data['Num of pregnancies'].max())) facet.add_legend() # list the heatmap of top correlation corr = data.corr() # number of variables for heatmap k = 15 cols = corr.nlargest(k, 'Biopsy')['Biopsy'].index cm = np.corrcoef(data[cols].values.T) plt.figure(figsize=(12, 10)) sns.set(font_scale=1.25) sns.heatmap(cm, cbar = True, annot = True, square = True, annot_kws = {'size': 10}, yticklabels = cols.values, xticklabels = cols.values) plt.show() data.describe() data.info() data.describe() data.head(75) x = data.iloc[:,:46] y = data.iloc[:,45] print(x.shape) print(y.shape) # with the following function we can select highly correlated features # it will remove the first feature that is correlated with anything other feature def correlation(dataset, threshold): col_corr = set() # Set of all the names of correlated columns corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if abs(corr_matrix.iloc[i, j]) > threshold: # we are interested in absolute coeff value colname = corr_matrix.columns[i] # getting the name of column col_corr.add(colname) return col_corr corr_features = correlation(data, 0.7) len(set(corr_features)) corr_features # Selected features y = np.array(data['Biopsy']) x = data.drop(['Age','Number of sexual partners','First sexual intercourse','Num of pregnancies', 'Smokes (years)','Smokes_0.0','Hormonal Contraceptives (years)','IUD (years)','STDs (number)', 'STDs:cervical condylomatosis','STDs:vaginal condylomatosis','STDs:syphilis','STDs:pelvic inflammatory disease', 'STDs:genital herpes','STDs:molluscum contagiosum','STDs:AIDS','STDs:HIV','STDs:Hepatitis B','STDs:HPV', 'STDs: Time since last diagnosis','Smokes_0.0','Hormonal Contraceptives_0.0', 'STDs_0.0','Dx:Cancer_0','Dx:CIN_0','Dx_0','Hinselmann_0','Citology_0'], axis = 1 ).iloc[:] ``` # Traning & Test Dataset ``` # splitting the dataset into training and test set from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.4, random_state = 45) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) # MinMaxScaling from sklearn.preprocessing import MinMaxScaler # creating a minmax scaler mm = MinMaxScaler() # feeding the independent data into the scaler x_train = mm.fit_transform(x_train) x_test = mm.fit_transform(x_test) ``` # Applying machine learning algorithms # Logistic Regression ``` from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report # creating the model lr_clf = LogisticRegression() # feeding the training data into the model lr_clf.fit(x_train, y_train) # predicting the test set results y_pred = model.predict(x_test) # Calculating the accuracies print("Training accuracy :", model.score(x_train, y_train)) print("Testing accuracy :", model.score(x_test, y_test)) # classification report print(classification_report(y_test, y_pred)) # confusion matrix print(confusion_matrix(y_test, y_pred)) ``` # Logistic Regression using Hyperparameter Tuning ``` from sklearn.model_selection import GridSearchCV params = {"C": np.logspace(-4, 4, 20), "solver": ["liblinear"]} lr_clf = LogisticRegression() lr_cv = GridSearchCV(lr_clf, params, scoring="accuracy", n_jobs=-1, verbose=1, cv=5, iid=True) lr_cv.fit(x_train, y_train) best_params = lr_cv.best_params_ print(f"Best parameters: {best_params}") lr_clf = LogisticRegression(**best_params) lr_clf.fit(x_train, y_train) # predicting the test set results y_pred =lr_clf.predict(x_test) # Calculating the accuracies print("Training accuracy :", lr_clf.score(x_train, y_train)) print("Testing accuracy :", lr_clf.score(x_test, y_test)) # classification report print(classification_report(y_test, y_pred)) # confusion matrix print(confusion_matrix(y_test, y_pred)) import pickle file='cervical_tuned_LR.pkl' with open(file,'wb') as f: pickle.dump(lr_clf,f) ``` # Support Vector Classifier ``` from sklearn.svm import SVC # creating the model svm_clf = SVC() # feeding the training data into the model svm_clf.fit(x_train, y_train) # predicting the test set results y_pred = svm_clf.predict(x_test) # Calculating the accuracies print("Training accuracy :", svm_clf.score(x_train, y_train)) print("Testing accuracy :", svm_clf.score(x_test, y_test)) # classification report print(classification_report(y_test, y_pred)) # confusion matrix print(confusion_matrix(y_test, y_pred)) import pickle file='cervical_tuned_SVC.pkl' with open(file,'wb') as f: pickle.dump(svm_clf,f) ``` # Random Forest Classifier ``` from sklearn.ensemble import RandomForestClassifier # creating the model rf_clf = RandomForestClassifier() # feeding the training data into the model rf_clf.fit(x_train, y_train) # predicting the test set results y_pred = rf_clf.predict(x_test) # Calculating the accuracies print("Training accuracy :", rf_clf.score(x_train, y_train)) print("Testing accuracy :", rf_clf.score(x_test, y_test)) # classification report print(classification_report(y_test, y_pred)) # confusion matrix print(confusion_matrix(y_test, y_pred)) import pickle file='cervical_tuned_RF.pkl' with open(file,'wb') as f: pickle.dump(rf_clf,f) ``` # AdaBoost Classifier ``` from sklearn.ensemble import AdaBoostClassifier # creating the model adb_clf = AdaBoostClassifier() # feeding the training data into the model adb_clf.fit(x_train, y_train) # predicting the test set results y_pred = adb_clf.predict(x_test) # Calculating the accuracies print("Training accuracy :", adb_clf.score(x_train, y_train)) print("Testing accuracy :", adb_clf.score(x_test, y_test)) # classification report print(classification_report(y_test, y_pred)) # confusion matrix print(confusion_matrix(y_test, y_pred)) import pickle file='cervical_tuned_ADB.pkl' with open(file,'wb') as f: pickle.dump(adb_clf,f) test_score = accuracy_score(y_test, lr_clf.predict(x_test)) * 100 train_score = accuracy_score(y_train, lr_clf.predict(x_train)) * 100 results_df = pd.DataFrame(data=[["Logistic Regression", train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) test_score = accuracy_score(y_test, svm_clf.predict(x_test)) * 100 train_score = accuracy_score(y_train, svm_clf.predict(x_train)) * 100 df=pd.DataFrame(data=[["Support Vector Classifier", train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) results_df = results_df.append(df, ignore_index=True) test_score = accuracy_score(y_test, rf_clf.predict(x_test)) * 100 train_score = accuracy_score(y_train, rf_clf.predict(x_train)) * 100 df=pd.DataFrame(data=[["Random Forest Classifier", train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) results_df = results_df.append(df, ignore_index=True) test_score = accuracy_score(y_test, adb_clf.predict(x_test)) * 100 train_score = accuracy_score(y_train, adb_clf.predict(x_train)) * 100 df=pd.DataFrame(data=[["AdaBoost Classifier", train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) results_df = results_df.append(df, ignore_index=True) results_df ```
github_jupyter
# Before we begin * Github (Github Education) * Bitbucket * Kaggle # Introduction In this week, we want to implement an Ant Colony Optimization Algorithm to solve Travelling Sales Man problem. ``` import random import math import operator import matplotlib.pyplot as plt ``` # Content * Travelling Sales Man Problem * Helper Functions * Cost & Pheromone Graph * Designing Ants * Designing ACO * Running # Travelling Sales Man Problem(TSP) The travelling salesman problem (TSP) asks the following question: "Given a list of cities and the distances between each pair of cities, what is the shortest possible route that visits each city and returns to the origin city?" It is an NP-hard problem in combinatorial optimization, important in operations research and theoretical computer science. ![TSP Sample](figs/tsp.png) # Helper Functions \begin{align} Distance = \sqrt{ (y_2 - y_1)^2 + (x_2 - x_1)^2} \end{align} ``` def distance(city1: dict, city2: dict): return math.sqrt((city1['x'] - city2['x']) ** 2 + (city1['y'] - city2['y']) ** 2) def plot(points, path: list): x = [] y = [] for point in points: x.append(point[0]) y.append(point[1]) y = list(map(operator.sub, [max(y) for i in range(len(points))], y)) # for better visualization plt.plot(x, y, 'co') for k in range(1, len(path)): i = path[k - 1] # index of first city j = path[k] # index of next city plt.arrow(x[i], y[i], x[j] - x[i], y[j] - y[i], color='r', length_includes_head=True) plt.xlim(0, max(x) * 1.1) plt.ylim(0, max(y) * 1.1) plt.show() ``` # Prerequisites ### ACO Algorihtm ![ACO Algorithm](figs/aco_algorithm.PNG) ### Strategy ![Q Updating](figs/strategy.PNG) With $Q \in [0,1]$ ### Rho \begin{align} T_{ij}(t) \leftarrow rho * T_{ij}(t) \end{align} With $rho \in [0,1]$ ### Transition Probability ![Transition Probability](figs/transition_prob.PNG) With $\alpha, \beta \in [0,1]$ # Cost & Pheromone Graph The graph is a data structure that has the matrices that we needed to evaluate transition probability: ``` class Graph(object): def __init__(self, cost_matrix: list, rank: int): """ :param cost_matrix: :param rank: rank of the cost matrix """ self.matrix = cost_matrix self.rank = rank # noinspection PyUnusedLocal self.pheromone = [[1 / (rank * rank) for j in range(rank)] for i in range(rank)] ``` # Designing Ants ``` class Ant(object): def __init__(self, aco, graph: Graph): self.colony = aco self.graph = graph self.total_cost = 0.0 self.path = [] # path self.pheromone_delta = [] # the local increase of pheromone self.allowed = [i for i in range(graph.rank)] # nodes which are allowed for the next selection self.eta = [[0 if i == j else 1 / graph.matrix[i][j] for j in range(graph.rank)] \ for i in range(graph.rank)] # heuristic information for calculating start = random.randint(0, graph.rank - 1) # start from any node self.path.append(start) self.current = start self.allowed.remove(start) def select_next(self): denominator = 0 for i in self.allowed: denominator += self.graph.pheromone[self.current][i] ** self.colony.alpha \ * self.eta[self.current][i] ** self.colony.beta # noinspection PyUnusedLocal probabilities = [0 for i in range(self.graph.rank)] # probabilities for moving to a node in the next step for i in range(self.graph.rank): try: self.allowed.index(i) # test if allowed list contains i probabilities[i] = self.graph.pheromone[self.current][i] ** self.colony.alpha * \ self.eta[self.current][i] ** self.colony.beta / denominator except ValueError: pass # do nothing # select next node by probability roulette selected = 0 rand = random.random() for i, probability in enumerate(probabilities): rand -= probability if rand <= 0: selected = i break self.allowed.remove(selected) self.path.append(selected) self.total_cost += self.graph.matrix[self.current][selected] self.current = selected # noinspection PyUnusedLocal def update_pheromone_delta(self): self.pheromone_delta = [[0 for j in range(self.graph.rank)] for i in range(self.graph.rank)] for k in range(1, len(self.path)): i = self.path[k - 1] j = self.path[k] if self.colony.update_strategy == 1: # ant-quality system self.pheromone_delta[i][j] = self.colony.Q elif self.colony.update_strategy == 2: # ant-density system # noinspection PyTypeChecker self.pheromone_delta[i][j] = self.colony.Q / self.graph.matrix[i][j] else: # ant-cycle system self.pheromone_delta[i][j] = self.colony.Q / self.total_cost ``` # Designing ACO ``` class ACO(object): def __init__(self, ant_count: int, generations: int, alpha: float, beta: float, rho: float, q: int, strategy: int): """ :param ant_count: :param generations: :param alpha: relative importance of pheromone :param beta: relative importance of heuristic information :param rho: pheromone residual coefficient :param q: pheromone intensity :param strategy: pheromone update strategy. 0 - ant-cycle, 1 - ant-quality, 2 - ant-density """ self.Q = q self.rho = rho # Evapuration Rate self.beta = beta self.alpha = alpha self.ant_count = ant_count self.generations = generations self.update_strategy = strategy def _update_pheromone(self, graph: Graph, ants: list): for i, row in enumerate(graph.pheromone): for j, col in enumerate(row): graph.pheromone[i][j] *= self.rho # Evapuration for ant in ants: graph.pheromone[i][j] += ant.pheromone_delta[i][j] def solve(self, graph: Graph): """ :param graph: """ best_cost = float('inf') best_solution = [] for gen in range(self.generations): # noinspection PyUnusedLocal ants = [Ant(self, graph) for i in range(self.ant_count)] for ant in ants: for i in range(graph.rank - 1): ant.select_next() ant.total_cost += graph.matrix[ant.path[-1]][ant.path[0]] if ant.total_cost < best_cost: best_cost = ant.total_cost best_solution = [] + ant.path # update pheromone ant.update_pheromone_delta() self._update_pheromone(graph, ants) print('generation #{}, best cost: {}, path: {}'.format(gen, best_cost, best_solution)) return best_solution, best_cost ``` # Running ``` def main(): # Loading Data from files cities = [] points = [] with open('./data/chn31.txt') as f: for line in f.readlines(): city = line.split(' ') cities.append(dict(index=int(city[0]), x=int(city[1]), y=int(city[2]))) points.append((int(city[1]), int(city[2]))) # Calculating Cost matrix => distance between city i and j cost_matrix = [] rank = len(cities) for i in range(rank): row = [] for j in range(rank): row.append(distance(cities[i], cities[j])) cost_matrix.append(row) # Instaniate ACO, and Run aco = ACO(10, 100, 1.0, 10.0, 0.5, 10, 2) graph = Graph(cost_matrix, rank) path, cost = aco.solve(graph) print('cost: {}, path: {}'.format(cost, path)) # Ploting the best cycle found plot(points, path) if __name__ == '__main__': main() ```
github_jupyter
``` %pylab inline ``` # Drawing random numbers in Python ## 1. Drawing using the rectangular distribution The prerequisite for drawing from a probability distribution is the ability to draw randomly from the rectangular or uniform distribution on $(0,1)$. For any other distribution, draws can be generated by 1) draw $\xi$ randomly from the uniform distribution 2) evaluate the inverse cumulative distribution function $G^{-1}(x)$ at $\xi$ ### Implementation in Python Uniform numbers in Python are drawn by ``` python import numpy as np xi = np.random.rand(size) ``` Standard normally distributed values ```python xi = np.random.randn(size) ``` #### Example ```python import numpy as np np.random.randn(100) np.random.rand(100,10) ``` Probability distributions are implemented in _scipy_ with inverse cumulative distributions being implemented as **ppf** for the individual probability distributions: ``` python import scipy.stats as stats # normal distribution stats.norm.ppf(q, loc = 0, scale = 1) # gamma distribution stats.gamma.ppf(q, a, loc = 0, scale = 1) # t-distribution stats.t.ppf(q, dof, loc = 0, scale = 1) # poisson distribution stats.poisson.ppf(q, mu, loc = 0) ``` ### Exercise 1.1 Using the rectangular distribution, draw 1000 random numbers from - normal distribution with mean $mu=0.2$ and standard deviation $\sigma=0.1$ - gamma distribution with shape parameter $a=2.5$ and scale parameter $s=0.2$ - t-distribution with 5 degrees of freedom, located around $3.5$ and with scale $s=0.8$ Plot a histogram for each outcome. ``` from numpy.random import rand import scipy.stats as stats ``` ## 2. Drawing using the built-in generator functions The **scipy.stats** package provides over 90 different probability distributions, each with its own random number generating function. The basic usage is 1) Import the **scipy.stats** package ``` python import scipy.stats as stats ``` 2) Call the **rvs** function of the sought probalitity distribution with size as keyword argument ``` python xi = stats.norm.rvs(size=1000) xi = stats.gamma.rvs(a, size=1000) xi = stats.t.rvs(dof, size=1000) ``` The optional keyword parameters for each distribution correspond to those of the call for the inverse cumulative distribution function. ### Exercise 1.2 Repeat the random number generation from Exercise 1.1, but now use the built-in **rvs** function for each example. ### Curvilinear trapezoidal distribution To sample from CTrap(a, b, d), make two draws $r_1$ and $r_2$ independently from the standard rectangular distribution $R(0, 1)$ and form $$ a_s = (a − d) + 2dr_1 \qquad b_s = (a+b)-a_s , $$ and $$ \xi = a_s + (b_s − a_s)r_2 . $$ In this way $a_s$ is a draw from the rectangular distribution with limits $a \pm d$. $b_s$ is then formed to ensure that the midpoint of $a_s$ and $b_s$ is the prescribed value $x = (a + b)/2$. ### Task A certificate states that a voltage X lies in the interval 10.0 V ± 0.1 V. No other information is available concerning X, except that it is believed that the magnitude of the interval endpoints is the result of rounding correctly some numerical value. On this basis, that numerical value lies between 0.05 V and 0.15 V, since the numerical value of every point in the interval (0.05, 0.15) rounded to one significant decimal digit is 0.1. The location of the interval can therefore be regarded as fixed, whereas its width is inexact. The best estimate of X is x = 10.0 V. Based on a = 9.9 V, b = 10.1 V and d = 0.05 V, sample from the PDF and calculate the best estimate and the associated uncertainty. ``` a = 9.9 b = 10.1 d = 0.05 ```
github_jupyter
``` import numpy as np import pandas as pd import seaborn as sns %matplotlib notebook import matplotlib.pyplot as plt import matplotlib matplotlib.style.use('ggplot') ``` import data and drop NAs, calculate metascore/10 and rating*10 ``` imdb = pd.read_csv("C:\\Users\\Adam\\Google Drive\\School\\ComputerScience\\intro to data science\\rotten_needles\\data\\datasets\\movies_dataset.csv") #imdb = imdb.dropna() imdb = imdb.assign(rating10=(imdb['rating']*10)) imdb = imdb.assign(metascore10=(imdb['metascore']/10)) ``` create movie profit score column ``` imdb = imdb.assign(score1=100*(imdb.gross_income-imdb.budget)/imdb.budget) imdb = imdb.assign(score2=(imdb['gross_income']-imdb['budget'])) # best score measure imdb = imdb.assign(score3=np.log(imdb['gross_income'])/np.log(imdb['budget'])) # imdb[['score2', 'name','rating','metascore']].sort_values('score2',ascending=0) ``` # Figure shows scatter of gross income against meta score and imdb rating ``` plt.figure() imdb_temp = imdb imdb_temp['scaled_gross_income'] = np.log(imdb['gross_income']) # / 1000000 sns.regplot(x = imdb['rating']*10, y = 'scaled_gross_income', data = imdb_temp, color = 'yellow') sns.regplot(x = imdb['metascore'], y = 'scaled_gross_income', data = imdb_temp, color = 'Green') sns.plt.title("Gross Income against MetaScore \ IMDB Rating - Scatter") sns.plt.xlabel("IMDB Rating, Metascore") sns.plt.ylabel("Log of Gross Income") # legend_patches = matplotlib.patches.Patch(color='green', label='label') # Plot the legend sns.plt.legend(['IMDB Ratings', 'Metascore']) # imdb.isnull().sum() ``` # Figure shows distribution of Movie Ratings ``` plt.figure() sns.countplot(x = 'rating', data = imdb) plt.xticks(rotation=60) sns.plt.title("Distribution of Movie Ratings") sns.plt.xlabel("Movie Rating") sns.plt.ylabel("Count of Ratings") ``` # Distribution of ratings by Genres ``` temp = pd.DataFrame( data = { 'type': [i for i in range(1,11) for genre in imdb.columns if 'genre' in genre], 'votes': [imdb[imdb[genre] == 1]['rating_freq.1'].mean() for genre in imdb.columns if 'genre' in genre] + [imdb[imdb[genre] == 1]['rating_freq.2'].mean() for genre in imdb.columns if 'genre' in genre] + [imdb[imdb[genre] == 1]['rating_freq.3'].mean() for genre in imdb.columns if 'genre' in genre] + [imdb[imdb[genre] == 1]['rating_freq.4'].mean() for genre in imdb.columns if 'genre' in genre] + [imdb[imdb[genre] == 1]['rating_freq.5'].mean() for genre in imdb.columns if 'genre' in genre] + [imdb[imdb[genre] == 1]['rating_freq.6'].mean() for genre in imdb.columns if 'genre' in genre] + [imdb[imdb[genre] == 1]['rating_freq.7'].mean() for genre in imdb.columns if 'genre' in genre] + [imdb[imdb[genre] == 1]['rating_freq.8'].mean() for genre in imdb.columns if 'genre' in genre] + [imdb[imdb[genre] == 1]['rating_freq.9'].mean() for genre in imdb.columns if 'genre' in genre] + [imdb[imdb[genre] == 1]['rating_freq.10'].mean() for genre in imdb.columns if 'genre' in genre] }, index= [genre[genre.rfind('.')+1:] for genre in imdb.columns if 'genre' in genre]*10 ) plt.figure() sns.barplot(x = temp.index , y = 'votes',hue = 'type', data = temp) plt.xticks(rotation=45, ha='right') sns.plt.title("Distribution of Ratings by Genres") sns.plt.xlabel("Genres") sns.plt.ylabel("Number of Votes") ``` scattering stuff ``` # plt.figure() # plt.ylim([0,10]) # plt.xlim([0,10]) # sns.regplot(x ='avg_rating_per_demo.aged_under_18', y = 'avg_rating_per_demo.aged_45+', data = imdb, color = 'red') # plt.figure() # plt.ylim([0,10]) # plt.xlim([0,10]) # sns.regplot(x ='avg_rating_per_demo.aged_18-29', y = 'avg_rating_per_demo.aged_45+', data = imdb, color = 'green') # imdb.plot(kind='scatter', x='rating', y='avg_rating_per_demo.us_users'); ``` # Figure shows high correlation between opening weekend incomes and Total weekend ``` plt.figure() sns.regplot(x = 'opening_weekend_income', y = 'gross_income', data=imdb, color='seagreen') sns.plt.title("Opening weeked Incomes vs Total Incomes") sns.plt.xlabel("Opening Weekend") sns.plt.ylabel("Total") ``` correlations ``` # imdb[['metascore','critic_review_count','rating','rating_count','gross_income','rating_freq.3','rating_freq.4','rating_freq.5','rating_freq.6', # 'rating_freq.7','rating_freq.8','rating_freq.9','score2']].corr() # imdb[['avg_rating_per_demo.males','avg_rating_per_demo.females']].corr() ``` # figure shows how different age groups tend to vote the same, the diagonal shows the rating distribution of each age group ``` from pandas.tools.plotting import scatter_matrix temp = imdb[['avg_rating_per_demo.aged_under_18','avg_rating_per_demo.aged_18-29', 'avg_rating_per_demo.aged_30-44','avg_rating_per_demo.aged_45+']] temp.columns = ['-18','18-29','30-44','45+'] scatter_matrix(temp, alpha=0.2,figsize=(6,6)) plt.suptitle('Rating Scatter over Different Age Groups') ``` # figure shows that above 400K voters, the average rating is allways greater than 7 - people tend to rate when they like a movie ``` plt.figure() sns.regplot(x = 'rating_count', y = 'rating', data=imdb, color='seagreen') sns.plt.title("IMDB Rating vs Number of Votes") sns.plt.xlabel("Number of Votes") sns.plt.ylabel("IMDB Rating") ``` # figure shows the difference of males and females number of votes over different genres ``` temp = pd.DataFrame( data={ 'sex': ['Male' for genre in imdb.columns if 'genre' in genre] + ['Female' for genre in imdb.columns if 'genre' in genre], 'score': [ imdb[imdb[genre] == 1]['votes_per_demo.males'].mean() for genre in imdb.columns if 'genre' in genre ] + [ imdb[imdb[genre] == 1]['votes_per_demo.females'].mean() for genre in imdb.columns if 'genre' in genre ] }, index= [genre[genre.rfind('.')+1:] for genre in imdb.columns if 'genre' in genre] + [genre[genre.rfind('.')+1:] for genre in imdb.columns if 'genre' in genre] ) plt.figure() sns.barplot(x = temp.index , y = 'score',hue = 'sex', data = temp) plt.xticks(rotation=45, ha='right') sns.plt.title("Number of Votes, Difference between Male and Female") sns.plt.xlabel("Genres") sns.plt.ylabel("Number of Votes") ``` # figure shows the similarity of males and females average scores over different genres - women are more mefargenot! ``` temp1 = pd.DataFrame( data={ 'sex': ['Male' for genre in imdb.columns if 'genre' in genre] + ['Female' for genre in imdb.columns if 'genre' in genre], 'score': [ imdb[imdb[genre] == 1]['avg_rating_per_demo.males'].mean() for genre in imdb.columns if 'genre' in genre ] + [ imdb[imdb[genre] == 1]['avg_rating_per_demo.females'].mean() for genre in imdb.columns if 'genre' in genre ] }, index= [genre[genre.rfind('.')+1:] for genre in imdb.columns if 'genre' in genre] + [genre[genre.rfind('.')+1:] for genre in imdb.columns if 'genre' in genre] ) plt.figure() sns.barplot(x = temp1.index , y = 'score',hue = 'sex', data = temp1) plt.xticks(rotation=45, ha='right') sns.plt.title("Average Ratings, Difference between Male and Female") sns.plt.xlabel("Genres") sns.plt.ylabel("Average Rating") # plt.figure() # plt.ylim([0,10]) # plt.xlim([0,10]) # sns.regplot(x ='avg_rating_per_demo.males', y = 'avg_rating_per_demo.females', data = imdb, color = 'red') ``` # figure shows retrun on investment (gross income divided by budget) ``` temp2 = pd.DataFrame( data={ 'score': [ imdb[imdb[genre] == 1]['score1'].mean() for genre in imdb.columns if 'genre' in genre ] }, index= [genre[genre.rfind('.')+1:] for genre in imdb.columns if 'genre' in genre] ) plt.figure() sns.barplot(x = temp2.index , y = 'score', data = temp2) plt.xticks(rotation=45, ha='right') sns.plt.title("Return on Investment by Genre") sns.plt.xlabel("Genres") sns.plt.ylabel("Roi %") ```
github_jupyter
# Assignment 1: Bandits and Exploration/Exploitation Welcome to Assignment 1. This notebook will: - Help you create your first bandit algorithm - Help you understand the effect of epsilon on exploration and learn about the exploration/exploitation tradeoff - Introduce you to some of the reinforcement learning software we are going to use for this specialization This class uses RL-Glue to implement most of our experiments. It was originally designed by Adam White, Brian Tanner, and Rich Sutton. This library will give you a solid framework to understand how reinforcement learning experiments work and how to run your own. If it feels a little confusing at first, don't worry - we are going to walk you through it slowly and introduce you to more and more parts as you progress through the specialization. We are assuming that you have used a Jupyter notebook before. But if not, it is quite simple. Simply press the run button, or shift+enter to run each of the cells. The places in the code that you need to fill in will be clearly marked for you. ## Section 0: Preliminaries ``` # Import necessary libraries %matplotlib inline import numpy as np import matplotlib.pyplot as plt from rl_glue import RLGlue import main_agent import ten_arm_env import test_env from tqdm import tqdm import time ``` In the above cell, we import the libraries we need for this assignment. We use numpy throughout the course and occasionally provide hints for which methods to use in numpy. Other than that we mostly use vanilla python and the occasional other library, such as matplotlib for making plots. You might have noticed that we import ten_arm_env. This is the __10-armed Testbed__ introduced in [section 2.3](http://www.incompleteideas.net/book/RLbook2018.pdf) of the textbook. We use this throughout this notebook to test our bandit agents. It has 10 arms, which are the actions the agent can take. Pulling an arm generates a stochastic reward from a Gaussian distribution with unit-variance. For each action, the expected value of that action is randomly sampled from a normal distribution, at the start of each run. If you are unfamiliar with the 10-armed Testbed please review it in the textbook before continuing. __DO NOT IMPORT OTHER LIBRARIES as this will break the autograder.__ __DO NOT SET A RANDOM SEED as this will break the autograder.__ ## Section 1: Greedy Agent We want to create an agent that will find the action with the highest expected reward. One way an agent could operate is to always choose the action with the highest value based on the agent’s current estimates. This is called a greedy agent as it greedily chooses the action that it thinks has the highest value. Let's look at what happens in this case. First we are going to implement the argmax function, which takes in a list of action values and returns an action with the highest value. Why are we implementing our own instead of using the argmax function that numpy uses? Numpy's argmax function returns the first instance of the highest value. We do not want that to happen as it biases the agent to choose a specific action in the case of ties. Instead we want to break ties between the highest values randomly. So we are going to implement our own argmax function. You may want to look at [np.random.choice](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.choice.html) to randomly select from a list of values. ``` # [Graded] def argmax(q_values): """ Takes in a list of q_values and returns the index of the item with the highest value. Breaks ties randomly. returns: int - the index of the highest value in q_values """ top = float("-inf") ties = [] for i in range(len(q_values)): # if a value in q_values is greater than the highest value, then update top and reset ties to zero # if a value is equal to top value, then add the index to ties (hint: do this no matter what) # Note: You do not have to follow this exact solution. You can choose to do your own implementation. ### START CODE HERE ### if q_values[i] > top: top, ties = q_values[i], [i] elif q_values[i] == top: ties.append(i) ### END CODE HERE ### # return a random selection from ties. (hint: look at np.random.choice) ### START CODE HERE ### ind = np.random.choice(ties) ### END CODE HERE ### return ind # change this # Test argmax implentation test_array = [0, 0, 0, 0, 0, 0, 0, 0, 1, 0] assert argmax(test_array) == 8, "Check your argmax implementation returns the index of the largest value" test_array = [1, 0, 0, 1] total = 0 for i in range(100): total += argmax(test_array) np.save("argmax_test", total) assert total > 0, "Make sure your argmax implementation randomly choooses among the largest values. Make sure you are not setting a random seed (do not use np.random.seed)" assert total != 300, "Make sure your argmax implementation randomly choooses among the largest values." ``` Now we introduce the first part of an RL-Glue agent that you will implement. Here we are going to create a GreedyAgent and implement the agent_step method. This method gets called each time the agent takes a step. The method has to return the action selected by the agent. This method also ensures the agent’s estimates are updated based on the signals it gets from the environment. Fill in the code below to implement a greedy agent. ``` # Greedy agent here [Graded] class GreedyAgent(main_agent.Agent): def agent_step(self, reward, observation): """ Takes one step for the agent. It takes in a reward and observation and returns the action the agent chooses at that time step. Arguments: reward -- float, the reward the agent received from the environment after taking the last action. observation -- float, the observed state the agent is in. Do not worry about this for this assignment as you will not use it until future lessons. Returns: current_action -- int, the action chosen by the agent at the current time step. """ ### Useful Class Variables ### # self.q_values : An array with the agent’s value estimates for each action. # self.arm_count : An array with a count of the number of times each arm has been pulled. # self.last_action : The action that the agent took on the previous time step. ####################### # Update action values. Hint: Look at the algorithm in section 2.4 of the textbook. # Increment the counter in self.arm_count for the action from the previous time step # Update the step size using self.arm_count # Update self.q_values for the action from the previous time step # (~3-5 lines) ### START CODE HERE ### self.arm_count[self.last_action] += 1 self.q_values[self.last_action] += (reward - self.q_values[self.last_action]) / self.arm_count[self.last_action] ### END CODE HERE ### # current action = ? # Use the argmax function you created above # (~2 lines) ### START CODE HERE ### current_action = argmax(self.q_values) ### END CODE HERE ### self.last_action = current_action return current_action # Do not modify this cell # Test for Greedy Agent Code greedy_agent = GreedyAgent() greedy_agent.q_values = [0, 0, 1.0, 0, 0] greedy_agent.arm_count = [0, 1, 0, 0, 0] greedy_agent.last_action = 1 action = greedy_agent.agent_step(1, 0) print(greedy_agent.q_values) np.save("greedy_test", greedy_agent.q_values) print("Output:") print(greedy_agent.q_values) print("Expected Output:") print([0, 0.5, 1.0, 0, 0]) assert action == 2, "Check that you are using argmax to choose the action with the highest value." assert greedy_agent.q_values == [0, 0.5, 1.0, 0, 0], "Check that you are updating q_values correctly." ``` Let's visualize the result. Here we run an experiment using RL-Glue to test our agent. For now, we will set up the experiment code; in future lessons, we will walk you through running experiments so that you can create your own. ``` # Plot Greedy Result num_runs = 200 # The number of times we run the experiment num_steps = 1000 # The number of steps each experiment is run for env = ten_arm_env.Environment # We the environment to use agent = GreedyAgent # We choose what agent we want to use agent_info = {"num_actions": 10} # Pass the agent the information it needs; # here it just needs the number of actions (number of arms). env_info = {} # Pass the environment the information it needs; in this case, it is nothing. all_averages = [] for i in tqdm(range(num_runs)): # tqdm is what creates the progress bar below once the code is run rl_glue = RLGlue(env, agent) # Creates a new RLGlue experiment with the env and agent we chose above rl_glue.rl_init(agent_info, env_info) # Pass RLGlue what it needs to initialize the agent and environment rl_glue.rl_start() # Start the experiment scores = [0] averages = [] for i in range(num_steps): reward, _, action, _ = rl_glue.rl_step() # The environment and agent take a step and return # the reward, and action taken. scores.append(scores[-1] + reward) averages.append(scores[-1] / (i + 1)) all_averages.append(averages) plt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k') plt.plot([1.55 for _ in range(num_steps)], linestyle="--") plt.plot(np.mean(all_averages, axis=0)) plt.legend(["Best Possible", "Greedy"]) plt.title("Average Reward of Greedy Agent") plt.xlabel("Steps") plt.ylabel("Average reward") plt.show() greedy_scores = np.mean(all_averages, axis=0) np.save("greedy_scores", greedy_scores) ``` How did our agent do? Is it possible for it to do better? ## Section 2: Epsilon-Greedy Agent We learned about [another way for an agent to operate](https://www.coursera.org/learn/fundamentals-of-reinforcement-learning/lecture/tHDck/what-is-the-trade-off), where it does not always take the greedy action. Instead, sometimes it takes an exploratory action. It does this so that it can find out what the best action really is. If we always choose what we think is the current best action is, we may miss out on taking the true best action, because we haven't explored enough times to find that best action. Implement an epsilon-greedy agent below. Hint: we are implementing the algorithm from [section 2.4](http://www.incompleteideas.net/book/RLbook2018.pdf#page=52) of the textbook. You may want to use your greedy code from above and look at [np.random.random](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.random.html), as well as [np.random.randint](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.randint.html), to help you select random actions. ``` # Epsilon Greedy Agent here [Graded] class EpsilonGreedyAgent(main_agent.Agent): def agent_step(self, reward, observation): """ Takes one step for the agent. It takes in a reward and observation and returns the action the agent chooses at that time step. Arguments: reward -- float, the reward the agent received from the environment after taking the last action. observation -- float, the observed state the agent is in. Do not worry about this for this assignment as you will not use it until future lessons. Returns: current_action -- int, the action chosen by the agent at the current time step. """ ### Useful Class Variables ### # self.q_values : An array with the agent’s value estimates for each action. # self.arm_count : An array with a count of the number of times each arm has been pulled. # self.last_action : The action that the agent took on the previous time step. # self.epsilon : The probability an epsilon greedy agent will explore (ranges between 0 and 1) ####################### # Update action-values - this should be the same update as your greedy agent above # (~3-5 lines) ### START CODE HERE ### self.arm_count[self.last_action] += 1 self.q_values[self.last_action] += (reward - self.q_values[self.last_action]) / self.arm_count[self.last_action] ### END CODE HERE ### # Choose action using epsilon greedy # Randomly choose a number between 0 and 1 and see if it is less than self.epsilon # (Hint: look at np.random.random()). If it is, set current_action to a random action. # Otherwise choose current_action greedily as you did above. # (~4 lines) ### START CODE HERE ### if np.random.random() < self.epsilon: current_action = np.random.randint(len(self.q_values)) else: current_action = argmax(self.q_values) ### END CODE HERE ### self.last_action = current_action return current_action # Do not modify this cell # Test Code for Epsilon Greedy Agent e_greedy_agent = EpsilonGreedyAgent() e_greedy_agent.q_values = [0, 0, 1.0, 0, 0] e_greedy_agent.arm_count = [0, 1, 0, 0, 0] e_greedy_agent.num_actions = 5 e_greedy_agent.last_action = 1 e_greedy_agent.epsilon = 0.5 action = e_greedy_agent.agent_step(1, 0) print("Output:") print(e_greedy_agent.q_values) print("Expected Output:") print([0, 0.5, 1.0, 0, 0]) # assert action == 2, "Check that you are using argmax to choose the action with the highest value." assert e_greedy_agent.q_values == [0, 0.5, 1.0, 0, 0], "Check that you are updating q_values correctly." ``` Now that we have our epsilon greedy agent created. Let's compare it against the greedy agent with epsilon of 0.1. ``` # Plot Epsilon greedy results and greedy results num_runs = 200 num_steps = 1000 epsilon = 0.1 agent = EpsilonGreedyAgent env = ten_arm_env.Environment agent_info = {"num_actions": 10, "epsilon": epsilon} env_info = {} all_averages = [] for i in tqdm(range(num_runs)): rl_glue = RLGlue(env, agent) rl_glue.rl_init(agent_info, env_info) rl_glue.rl_start() scores = [0] averages = [] for i in range(num_steps): reward, _, action, _ = rl_glue.rl_step() # The environment and agent take a step and return # the reward, and action taken. scores.append(scores[-1] + reward) averages.append(scores[-1] / (i + 1)) all_averages.append(averages) plt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k') plt.plot([1.55 for _ in range(num_steps)], linestyle="--") plt.plot(greedy_scores) plt.title("Average Reward of Greedy Agent vs. Epsilon-Greedy Agent") plt.plot(np.mean(all_averages, axis=0)) plt.legend(("Best Possible", "Greedy", "Epsilon Greedy: Epsilon = 0.1")) plt.xlabel("Steps") plt.ylabel("Average reward") plt.show() np.save("e-greedy", all_averages) ``` Notice how much better the epsilon-greedy agent did. Because we occasionally choose a random action we were able to find a better long term policy. By acting greedily before our value estimates are accurate, we risk settling on a suboptimal action. ## 1.2 Averaging Multiple Runs Did you notice that we averaged over 2000 runs? Why did we do that? To get some insight, let's look at the results of two individual runs by the same agent. ``` # Plot runs of e-greedy agent agent = EpsilonGreedyAgent agent_info = {"num_actions": 10, "epsilon": 0.1} env_info = {} all_averages = [] plt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k') num_steps = 1000 for run in (0, 1): np.random.seed(run) # Here we set the seed so that we can compare two different runs averages = [] rl_glue = RLGlue(env, agent) rl_glue.rl_init(agent_info, env_info) rl_glue.rl_start() scores = [0] for i in range(num_steps): reward, state, action, is_terminal = rl_glue.rl_step() scores.append(scores[-1] + reward) averages.append(scores[-1] / (i + 1)) # all_averages.append(averages) plt.plot(averages) # plt.plot(greedy_scores) plt.title("Comparing two runs of the same agent") plt.xlabel("Steps") plt.ylabel("Average reward") # plt.plot(np.mean(all_averages, axis=0)) # plt.legend(("Greedy", "Epsilon: 0.1")) plt.show() ``` Notice how the two runs were different? But, if this is the exact same algorithm, why does it behave differently in these two runs? The answer is that it is due to randomness in the environment and in the agent. Depending on what action the agent randomly starts with, or when it randomly chooses to explore, it can change the results of the runs. And even if the agent chooses the same action, the reward from the environment is randomly sampled from a Gaussian. The agent could get lucky, and see larger rewards for the best action early on and so settle on the best action faster. Or, it could get unlucky and see smaller rewards for best action early on and so take longer to recognize that it is in fact the best action. To be more concrete, let’s look at how many times an exploratory action is taken, for different seeds. ``` print("Random Seed 1") np.random.seed(1) for _ in range(15): if np.random.random() < 0.1: print("Exploratory Action") print() print() print("Random Seed 2") np.random.seed(2) for _ in range(15): if np.random.random() < 0.1: print("Exploratory Action") ``` With the first seed, we take an exploratory action three times out of 15, but with the second, we only take an exploratory action once. This can significantly affect the performance of our agent because the amount of exploration has changed significantly. To compare algorithms, we therefore report performance averaged across many runs. We do this to ensure that we are not simply reporting a result that is due to stochasticity, as explained [in the lectures](https://www.coursera.org/learn/fundamentals-of-reinforcement-learning/lecture/PtVBs/sequential-decision-making-with-evaluative-feedback). Rather, we want statistically significant outcomes. We will not use statistical significance tests in this course. Instead, because we have access to simulators for our experiments, we use the simpler strategy of running for a large number of runs and ensuring that the confidence intervals do not overlap. ## Section 3: Comparing values of epsilon Can we do better than an epsilon of 0.1? Let's try several different values for epsilon and see how they perform. We try different settings of key performance parameters to understand how the agent might perform under different conditions. Below we run an experiment where we sweep over different values for epsilon: ``` # Experiment code for epsilon-greedy with different values of epsilon epsilons = [0.0, 0.01, 0.1, 0.4] plt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k') plt.plot([1.55 for _ in range(num_steps)], linestyle="--") n_q_values = [] n_averages = [] n_best_actions = [] num_runs = 200 for epsilon in epsilons: all_averages = [] for run in tqdm(range(num_runs)): agent = EpsilonGreedyAgent agent_info = {"num_actions": 10, "epsilon": epsilon} env_info = {"random_seed": run} rl_glue = RLGlue(env, agent) rl_glue.rl_init(agent_info, env_info) rl_glue.rl_start() best_arm = np.argmax(rl_glue.environment.arms) scores = [0] averages = [] best_action_chosen = [] for i in range(num_steps): reward, state, action, is_terminal = rl_glue.rl_step() scores.append(scores[-1] + reward) averages.append(scores[-1] / (i + 1)) if action == best_arm: best_action_chosen.append(1) else: best_action_chosen.append(0) if epsilon == 0.1 and run == 0: n_q_values.append(np.copy(rl_glue.agent.q_values)) if epsilon == 0.1: n_averages.append(averages) n_best_actions.append(best_action_chosen) all_averages.append(averages) plt.plot(np.mean(all_averages, axis=0)) plt.legend(["Best Possible"] + epsilons) plt.xlabel("Steps") plt.ylabel("Average reward") plt.show() ``` Why did 0.1 perform better than 0.01? If exploration helps why did 0.4 perform worse that 0.0 (the greedy agent)? Think about these and how you would answer these questions. They are questions in the practice quiz. If you still have questions about it, retake the practice quiz. ## Section 4: The Effect of Step Size In Section 1 of this assignment, we decayed the step size over time based on action-selection counts. The step-size was 1/N(A), where N(A) is the number of times action A was selected. This is the same as computing a sample average. We could also set the step size to be a constant value, such as 0.1. What would be the effect of doing that? And is it better to use a constant or the sample average method? To investigate this question, let’s start by creating a new agent that has a constant step size. This will be nearly identical to the agent created above. You will use the same code to select the epsilon-greedy action. You will change the update to have a constant step size instead of using the 1/N(A) update. ``` # Constant Step Size Agent Here [Graded] # Greedy agent here class EpsilonGreedyAgentConstantStepsize(main_agent.Agent): def agent_step(self, reward, observation): """ Takes one step for the agent. It takes in a reward and observation and returns the action the agent chooses at that time step. Arguments: reward -- float, the reward the agent received from the environment after taking the last action. observation -- float, the observed state the agent is in. Do not worry about this for this assignment as you will not use it until future lessons. Returns: current_action -- int, the action chosen by the agent at the current time step. """ ### Useful Class Variables ### # self.q_values : An array with the agent’s value estimates for each action. # self.arm_count : An array with a count of the number of times each arm has been pulled. # self.last_action : The action that the agent took on the previous time step. # self.step_size : A float which is the current step size for the agent. # self.epsilon : The probability an epsilon greedy agent will explore (ranges between 0 and 1) ####################### # Update q_values for action taken at previous time step # using self.step_size intead of using self.arm_count # (~1-2 lines) ### START CODE HERE ### self.arm_count[self.last_action] += 1 self.q_values[self.last_action] += self.step_size * (reward - self.q_values[self.last_action]) ### END CODE HERE ### # Choose action using epsilon greedy. This is the same as you implemented above. # (~4 lines) ### START CODE HERE ### if np.random.random() < self.epsilon: current_action = np.random.randint(len(self.q_values)) else: current_action = argmax(self.q_values) ### END CODE HERE ### self.last_action = current_action return current_action # Do not modify this cell # Test Code for Epsilon Greedy with Different Constant Stepsizes for step_size in [0.01, 0.1, 0.5, 1.0]: e_greedy_agent = EpsilonGreedyAgentConstantStepsize() e_greedy_agent.q_values = [0, 0, 1.0, 0, 0] # e_greedy_agent.arm_count = [0, 1, 0, 0, 0] e_greedy_agent.num_actions = 5 e_greedy_agent.last_action = 1 e_greedy_agent.epsilon = 0.0 e_greedy_agent.step_size = step_size action = e_greedy_agent.agent_step(1, 0) print("Output for step size: {}".format(step_size)) print(e_greedy_agent.q_values) print("Expected Output:") print([0, step_size, 1.0, 0, 0]) assert e_greedy_agent.q_values == [0, step_size, 1.0, 0, 0], "Check that you are updating q_values correctly using the stepsize." # Experiment code for different step sizes [graded] step_sizes = [0.01, 0.1, 0.5, 1.0] epsilon = 0.1 num_steps = 1000 num_runs = 200 fig, ax = plt.subplots(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k') q_values = {step_size: [] for step_size in step_sizes} true_values = {step_size: None for step_size in step_sizes} best_actions = {step_size: [] for step_size in step_sizes} for step_size in step_sizes: all_averages = [] for run in tqdm(range(num_runs)): agent = EpsilonGreedyAgentConstantStepsize agent_info = {"num_actions": 10, "epsilon": epsilon, "step_size": step_size, "initial_value": 0.0} env_info = {"random_seed": run} rl_glue = RLGlue(env, agent) rl_glue.rl_init(agent_info, env_info) rl_glue.rl_start() best_arm = np.argmax(rl_glue.environment.arms) scores = [0] averages = [] if run == 0: true_values[step_size] = np.copy(rl_glue.environment.arms) best_action_chosen = [] for i in range(num_steps): reward, state, action, is_terminal = rl_glue.rl_step() scores.append(scores[-1] + reward) averages.append(scores[-1] / (i + 1)) if action == best_arm: best_action_chosen.append(1) else: best_action_chosen.append(0) if run == 0: q_values[step_size].append(np.copy(rl_glue.agent.q_values)) best_actions[step_size].append(best_action_chosen) ax.plot(np.mean(best_actions[step_size], axis=0)) if step_size == 0.01: np.save("step_size", best_actions[step_size]) ax.plot(np.mean(n_best_actions, axis=0)) fig.legend(step_sizes + ["1/N(A)"]) plt.title("% Best Action Taken") plt.xlabel("Steps") plt.ylabel("% Best Action Taken") vals = ax.get_yticks() ax.set_yticklabels(['{:,.2%}'.format(x) for x in vals]) plt.show() ``` Notice first that we are now plotting the amount of time that the best action is taken rather than the average reward. To better understand the performance of an agent, it can be useful to measure specific behaviors, beyond just how much reward is accumulated. This measure indicates how close the agent’s behaviour is to optimal. It seems as though 1/N(A) performed better than the others, in that it reaches a solution where it takes the best action most frequently. Now why might this be? Why did a step size of 0.5 start out better but end up performing worse? Why did a step size of 0.01 perform so poorly? Let's dig into this further below. Let’s plot how well each agent tracks the true value, where each agent has a different step size method. You do not have to enter any code here, just follow along. ``` # Plot various step sizes and estimates largest = 0 num_steps = 1000 for step_size in step_sizes: plt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k') largest = np.argmax(true_values[step_size]) plt.plot([true_values[step_size][largest] for _ in range(num_steps)], linestyle="--") plt.title("Step Size: {}".format(step_size)) plt.plot(np.array(q_values[step_size])[:, largest]) plt.legend(["True Expected Value", "Estimated Value"]) plt.xlabel("Steps") plt.ylabel("Value") plt.show() plt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k') plt.title("Step Size: 1/N(A)") plt.plot([true_values[step_size][largest] for _ in range(num_steps)], linestyle="--") plt.plot(np.array(n_q_values)[:, largest]) plt.legend(["True Expected Value", "Estimated Value"]) plt.xlabel("Steps") plt.ylabel("Value") plt.show() ``` These plots help clarify the performance differences between the different step sizes. A step size of 0.01 makes such small updates that the agent’s value estimate of the best action does not get close to the actual value. Step sizes of 0.5 and 1.0 both get close to the true value quickly, but are very susceptible to stochasticity in the rewards. The updates overcorrect too much towards recent rewards, and so oscillate around the true value. This means that on many steps, the action that pulls the best arm may seem worse than it actually is. A step size of 0.1 updates fairly quickly to the true value, and does not oscillate as widely around the true values as 0.5 and 1.0. This is one of the reasons that 0.1 performs quite well. Finally we see why 1/N(A) performed well. Early on while the step size is still reasonably high it moves quickly to the true expected value, but as it gets pulled more its step size is reduced which makes it less susceptible to the stochasticity of the rewards. Does this mean that 1/N(A) is always the best? When might it not be? One possible setting where it might not be as effective is in non-stationary problems. You learned about non-stationarity in the lessons. Non-stationarity means that the environment may change over time. This could manifest itself as continual change over time of the environment, or a sudden change in the environment. Let's look at how a sudden change in the reward distributions affects a step size like 1/N(A). This time we will run the environment for 2000 steps, and after 1000 steps we will randomly change the expected value of all of the arms. We compare two agents, both using epsilon-greedy with epsilon = 0.1. One uses a constant step size of 0.1, the other a step size of 1/N(A) that reduces over time. ``` epsilon = 0.1 num_steps = 2000 num_runs = 200 step_size = 0.1 plt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k') plt.plot([1.55 for _ in range(num_steps)], linestyle="--") for agent in [EpsilonGreedyAgent, EpsilonGreedyAgentConstantStepsize]: all_averages = [] for run in tqdm(range(num_runs)): agent_info = {"num_actions": 10, "epsilon": epsilon, "step_size": step_size} env_info = {"random_seed": run} rl_glue = RLGlue(env, agent) rl_glue.rl_init(agent_info, env_info) rl_glue.rl_start() scores = [0] averages = [] for i in range(num_steps): reward, state, action, is_terminal = rl_glue.rl_step() scores.append(scores[-1] + reward) averages.append(scores[-1] / (i + 1)) if i == 1000: rl_glue.environment.arms = np.random.randn(10) all_averages.append(averages) plt.plot(np.mean(all_averages, axis=0)) plt.legend(["Best Possible", "1/N(A)", "0.1"]) plt.xlabel("Steps") plt.ylabel("Average reward") plt.show() ``` Now the agent with a step size of 1/N(A) performed better at the start but then performed worse when the environment changed! What happened? Think about what the step size would be after 1000 steps. Let's say the best action gets chosen 500 times. That means the step size for that action is 1/500 or 0.002. At each step when we update the value of the action and the value is going to move only 0.002 * the error. That is a very tiny adjustment and it will take a long time for it to get to the true value. The agent with step size 0.1, however, will always update in 1/10th of the direction of the error. This means that on average it will take ten steps for it to update its value to the sample mean. These are the types of tradeoffs we have to think about in reinforcement learning. A larger step size moves us more quickly toward the true value, but can make our estimated values oscillate around the expected value. A step size that reduces over time can converge to close to the expected value, without oscillating. On the other hand, such a decaying stepsize is not able to adapt to changes in the environment. Nonstationarity---and the related concept of partial observability---is a common feature of reinforcement learning problems and when learning online. ## Section 5: Conclusion Great work! You have: - Implemented your first agent - Learned about the effect of epsilon, an exploration parameter, on the performance of an agent - Learned about the effect of step size on the performance of the agent - Learned about a good experiment practice of averaging across multiple runs
github_jupyter
# Adau1761_0 IP This notebook serves as a quick demonstration of the audio codec being used in the **PYNQ-Z2 board**. A new IP has been introduced to make use of the codec. Before starting with this notebook please ensure you have the following: * Added the new audio.py file in the board * Added the new pl.py file in the board * Also, a new libsaudio.so is to be added ## How the new IP looks like? This is a screenshot of the addition done to the exsisting base overlay. Instead of the original audio IP block the new one looks like this <p align="center"> <img src ="./sources/IP.JPG" width="100%" height="100%"/> </p> As we can see : * The **adau1761_0** IP is where the main AXI interactions take place. It also conists of a serializer, to serialize the audio going to the headphone jack, and a deserializer, to decode the sound coming from the MIC. * The **axi_dma_0** IP is responsible for streaming audio data to the adau1761_0 through the _Slave AXI-Stream_ Interface of adau1761_0 * Thw **segement_stream_0** is responsible for controlling the _Master AXI_Stream_ Interface of adau1761_0 # Wavgen This is a seprate python function to generate a sine wave and save it as a _.wav_ file. The function description is as follows: ``` audio_write("name_of_the_file.wav", sampling rate, time period, frequency of sine wave) ``` ( Make sure to keep this jupyter nb in the same place where the wavegen.py file is) ``` from wavgen import audio_write audio_write("./output/samples.wav",100,5,44) ``` The waveform being generated: ``` %matplotlib inline import wave import numpy as np import matplotlib import matplotlib.pyplot as plt from scipy.fftpack import fft wav_path = "./output/samples.wav" with wave.open(wav_path, 'r') as wav_file: raw_frames = wav_file.readframes(-1) num_frames = wav_file.getnframes() num_channels = wav_file.getnchannels() sample_rate = wav_file.getframerate() sample_width = wav_file.getsampwidth() temp_buffer = np.empty((num_frames, num_channels, 4), dtype=np.uint8) raw_bytes = np.frombuffer(raw_frames, dtype=np.uint8) temp_buffer[:, :, :sample_width] = raw_bytes.reshape(-1, num_channels, sample_width) temp_buffer[:, :, sample_width:] = \ (temp_buffer[:, :, sample_width-1:sample_width] >> 7) * 255 frames = temp_buffer.view('<i4').reshape(temp_buffer.shape[:-1]) for channel_index in range(num_channels): plt.figure(num=None, figsize=(15, 3)) plt.title('Audio in Time Domain (Channel {})'.format(channel_index)) plt.xlabel('Time in s') plt.ylabel('Amplitude') time_axis = np.arange(0, num_frames/sample_rate, 1/sample_rate) plt.plot(time_axis, frames[:, channel_index]) plt.show() ``` # Initialization ### Create a new audio object ``` from audio import * base=Overlay("./sources/AXIS_audio.bit") Audiobj=base.adau1761_0 ``` ## Bypass audio Users can select either `LINE_IN`, or `HP+MIC` as the input port. In the following example, we choose `LINE_IN`. To choose `MIC`: ```python pAudio.select_microphone() ``` or choose `LINE_IN`: ```python pAudio.select_line_in() ``` ``` Audiobj.select_microphone() ``` ## Load and play Load a sample and play the loaded sample. ``` Audiobj.load("./sources/sine.wav") ``` ## Play function ## Stream Copy the list genrated from the audio file (the load() function generates this) into an array. ``` buf = Audiobj.buffer ``` Create a continous allocated memory numpy array ``` import pynq.lib.dma from pynq import Xlnk xlnk = Xlnk() dma_send = base.axi_dma_0 cma_ar = xlnk.cma_array(buf.shape, buf.dtype) cma_ar[:] = buf ``` The `playinit()` initializes the various audio codec registers. The numpy array which we declared above is passed onto the **DMA** send channel. ``` async def play_audio(): Audiobj.playinit() dma_send.sendchannel.transfer(cma_ar) await dma_send.sendchannel.wait_async() ``` ## Monitoring the CPU Usage To see how CPU usages is impacted by the audio stream we create another task that prints out the current CPU utilisation every 3 seconds. ``` import psutil import asyncio @asyncio.coroutine def print_cpu_usage(): # Calculate the CPU utilisation by the amount of idle time # each CPU has had in three second intervals last_idle = [c.idle for c in psutil.cpu_times(percpu=True)] while True: yield from asyncio.sleep(3) next_idle = [c.idle for c in psutil.cpu_times(percpu=True)] usage = [(1-(c2-c1)/3) * 100 for c1,c2 in zip(last_idle, next_idle)] print("CPU Usage: {0:3.2f}%, {1:3.2f}%".format(*usage)) last_idle = next_idle audio_task = asyncio.ensure_future(play_audio()) cpu_task = asyncio.ensure_future(print_cpu_usage()) asyncio.get_event_loop().run_until_complete(audio_task) ``` The `playend()` mutes the various audio codec registers which were being used. ``` Audiobj.playend() ``` ### Slave The play() function of the AXI-Slave is not configured properly. Please note. ``` Audiobj.play() ``` ## Record function Records a 5-second sample and is stored in a continous memory allocated array : ### Stream Enter the time for which the recording will take place: ``` seconds = 5 ``` Create a continous allocated memory numpy array ``` import numpy as np import pynq.lib.dma from pynq import Xlnk xlnk = Xlnk() dma_send = base.axi_dma_0 cma_ar = xlnk.cma_array(shape = seconds * 2 * 48000, dtype = "uint32") ``` The segement_stream is responsible for managing the AXI-Stream transactions between the `MIC` (Master AXI Stream) of the audio codec and the PS (Slave Stream). ``` base.segment_stream_0.write(0, seconds * 2 * 48000) ``` After this we have to send the audio array to the DMA ``` Audiobj.recordinit(seconds) dma_send.recvchannel.transfer(cma_ar) dma_send.recvchannel.wait() ``` And then to play it, we will use the DMA again to play from the array: ``` Audiobj.playinit() dma_send.sendchannel.transfer(cma_ar) dma_send.sendchannel.wait() Audiobj.playend() ``` ### Slave This here again is the recording function, but uses the **AXI-Slave** instead of the **AXI-Stream**. ``` Audiobj.record(seconds=5) Audiobj.play() ```
github_jupyter
<!--BOOK_INFORMATION--> <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png"> *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* <!--NAVIGATION--> < [IPython Magic Commands](01.03-Magic-Commands.ipynb) | [Contents](Index.ipynb) | [IPython and Shell Commands](01.05-IPython-And-Shell-Commands.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/01.04-Input-Output-History.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a> # Input and Output History Previously we saw that the IPython shell allows you to access previous commands with the up and down arrow keys, or equivalently the Ctrl-p/Ctrl-n shortcuts. Additionally, in both the shell and the notebook, IPython exposes several ways to obtain the output of previous commands, as well as string versions of the commands themselves. We'll explore those here. ## IPython's ``In`` and ``Out`` Objects By now I imagine you're quite familiar with the ``In [1]:``/``Out[1]:`` style prompts used by IPython. But it turns out that these are not just pretty decoration: they give a clue as to how you can access previous inputs and outputs in your current session. Imagine you start a session that looks like this: ```ipython In [1]: import math In [2]: math.sin(2) Out[2]: 0.9092974268256817 In [3]: math.cos(2) Out[3]: -0.4161468365471424 ``` We've imported the built-in ``math`` package, then computed the sine and the cosine of the number 2. These inputs and outputs are displayed in the shell with ``In``/``Out`` labels, but there's more–IPython actually creates some Python variables called ``In`` and ``Out`` that are automatically updated to reflect this history: ```ipython In [4]: print(In) ['', 'import math', 'math.sin(2)', 'math.cos(2)', 'print(In)'] In [5]: Out Out[5]: {2: 0.9092974268256817, 3: -0.4161468365471424} ``` The ``In`` object is a list, which keeps track of the commands in order (the first item in the list is a place-holder so that ``In[1]`` can refer to the first command): ```ipython In [6]: print(In[1]) import math ``` The ``Out`` object is not a list but a dictionary mapping input numbers to their outputs (if any): ```ipython In [7]: print(Out[2]) 0.9092974268256817 ``` Note that not all operations have outputs: for example, ``import`` statements and ``print`` statements don't affect the output. The latter may be surprising, but makes sense if you consider that ``print`` is a function that returns ``None``; for brevity, any command that returns ``None`` is not added to ``Out``. Where this can be useful is if you want to interact with past results. For example, let's check the sum of ``sin(2) ** 2`` and ``cos(2) ** 2`` using the previously-computed results: ```ipython In [8]: Out[2] ** 2 + Out[3] ** 2 Out[8]: 1.0 ``` The result is ``1.0`` as we'd expect from the well-known trigonometric identity. In this case, using these previous results probably is not necessary, but it can become very handy if you execute a very expensive computation and want to reuse the result! ## Underscore Shortcuts and Previous Outputs The standard Python shell contains just one simple shortcut for accessing previous output; the variable ``_`` (i.e., a single underscore) is kept updated with the previous output; this works in IPython as well: ```ipython In [9]: print(_) 1.0 ``` But IPython takes this a bit further—you can use a double underscore to access the second-to-last output, and a triple underscore to access the third-to-last output (skipping any commands with no output): ```ipython In [10]: print(__) -0.4161468365471424 In [11]: print(___) 0.9092974268256817 ``` IPython stops there: more than three underscores starts to get a bit hard to count, and at that point it's easier to refer to the output by line number. There is one more shortcut we should mention, however–a shorthand for ``Out[X]`` is ``_X`` (i.e., a single underscore followed by the line number): ```ipython In [12]: Out[2] Out[12]: 0.9092974268256817 In [13]: _2 Out[13]: 0.9092974268256817 ``` ## Suppressing Output Sometimes you might wish to suppress the output of a statement (this is perhaps most common with the plotting commands that we'll explore in [Introduction to Matplotlib](04.00-Introduction-To-Matplotlib.ipynb)). Or maybe the command you're executing produces a result that you'd prefer not like to store in your output history, perhaps so that it can be deallocated when other references are removed. The easiest way to suppress the output of a command is to add a semicolon to the end of the line: ```ipython In [14]: math.sin(2) + math.cos(2); ``` Note that the result is computed silently, and the output is neither displayed on the screen or stored in the ``Out`` dictionary: ```ipython In [15]: 14 in Out Out[15]: False ``` ## Related Magic Commands For accessing a batch of previous inputs at once, the ``%history`` magic command is very helpful. Here is how you can print the first four inputs: ```ipython In [16]: %history -n 1-4 1: import math 2: math.sin(2) 3: math.cos(2) 4: print(In) ``` As usual, you can type ``%history?`` for more information and a description of options available. Other similar magic commands are ``%rerun`` (which will re-execute some portion of the command history) and ``%save`` (which saves some set of the command history to a file). For more information, I suggest exploring these using the ``?`` help functionality discussed in [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb). <!--NAVIGATION--> < [IPython Magic Commands](01.03-Magic-Commands.ipynb) | [Contents](Index.ipynb) | [IPython and Shell Commands](01.05-IPython-And-Shell-Commands.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/01.04-Input-Output-History.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
github_jupyter
<a href="https://colab.research.google.com/github/butchland/fastai_nb_explorations/blob/master/CollectRealFingersData.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Data Exploration notebooks for Fingers datasets ### Run using a CPU Runtime (no GPU needed) ## Environment setup ``` !curl https://course.fast.ai/setup/colab | bash !pip install fastai2 --upgrade !pip install fastcore --upgrade !pip install nbdev --upgrade from google.colab import drive drive.mount('/content/drive') from fastai2.vision.all import * escdrive = lambda x : x.as_posix().replace(' ','\ ') gdrive = Path('/content/drive/My Drive/fastai_v4') config = Config() data_path = config.d['data_path'] archive_path = config.d['archive_path'] model_path = config.d['model_path'] (data_path, archive_path,model_path) ``` ## Collect Realworld Pics as Test Data ``` from IPython.display import display, Javascript from google.colab.output import eval_js from base64 import b64decode def take_photo(filename='photo.jpg', quality=0.8): js = Javascript(''' async function takePhoto(quality) { const div = document.createElement('div'); const capture = document.createElement('button'); capture.textContent = 'Capture'; div.appendChild(capture); const video = document.createElement('video'); video.style.display = 'block'; const stream = await navigator.mediaDevices.getUserMedia({video: true}); document.body.appendChild(div); div.appendChild(video); video.srcObject = stream; await video.play(); // Resize the output to fit the video element. google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true); // Wait for Capture to be clicked. await new Promise((resolve) => capture.onclick = resolve); const canvas = document.createElement('canvas'); canvas.width = video.videoWidth; canvas.height = video.videoHeight; canvas.getContext('2d').drawImage(video, 0, 0); stream.getVideoTracks()[0].stop(); div.remove(); return canvas.toDataURL('image/jpeg', quality); } ''') display(js) data = eval_js('takePhoto({})'.format(quality)) binary = b64decode(data.split(',')[1]) with open(filename, 'wb') as f: f.write(binary) return filename from IPython.display import Image try: filename = take_photo() print('Saved to {}'.format(filename)) # Show the image which was just taken. display(Image(filename)) except Exception as err: # Errors will be thrown if the user does not have a webcam or if they do not # grant the page permission to access it. print(str(err)) !mv photo.jpg test1_1R.jpg !mkdir -p data/fingers/testset/orig !mv *.jpg data/fingers/testset/orig dataset = 'testset' path = Path(data_path)/'fingers' ``` ## Backup realworld testset to GDrive ``` !tar -czf {(Path(data_path)/(dataset + '.tgz')).as_posix()} -C {Path(data_path).as_posix()} {path.as_posix()} !cp data/testset.tgz {escdrive(gdrive/'data'/'fingers_testset.tgz')} ```
github_jupyter
``` import random import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt import esda import libpysal.weights as weights from esda.moran import Moran from shapely.geometry import Point, MultiPoint, LineString, Polygon, shape import json import pylab import libpysal import numpy as np from sklearn.metrics.cluster import adjusted_rand_score from sklearn.metrics import f1_score from pyclustering.cluster.cure import cure from pyclustering.cluster.kmeans import kmeans from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer from sklearn import preprocessing def permutation(lst): if len(lst) == 0: return [] if len(lst) == 1: return [lst] l = [] for i in range(len(lst)): m = lst[i] remLst = lst[:i] + lst[i+1:] for p in permutation(remLst): l.append([m] + p) return l def get_f1_score(df, permut): def match_clus(x, permut): if x == 0: return int(permut[0]) elif x == 1: return int(permut[1]) else: return x df["group_match"] = df["group"].apply(lambda x: match_clus(x, permut)) return df, f1_score(df.group_match.values, df.clus_group_gt.values, average='macro') def get_max_f1_score(df): max_f1 = 0 max_p = [] for p in permutation([3,4]): df, f1 = get_f1_score(df, p) if max_f1 < f1: max_f1 = f1 max_p = p print("f1_score ", max_f1, max_p) def cal_joint_statistic(nyc_data, w_voronoi): matched_connects = 0 all_neighbors_connects = 0 for obj_id, neighbors in w_voronoi.neighbors.items(): obj_clus = nyc_data.iat[obj_id, -1] for nei in neighbors: nei_clus = nyc_data.iat[nei, -1] all_neighbors_connects += 1 if obj_clus == nei_clus: matched_connects += 1 return matched_connects / all_neighbors_connects ``` # Processing NYC Check-in Data ``` nyc_check_in = gpd.read_file('data/nyc_checkin.shp') nyc_check_in.head(1) nyc_check_in.groupby("venueCateg").count().sort_values("venueId").tail(1) venueCateg_list = ["Office", "Home (private)"] venueId_list = pd.DataFrame(nyc_check_in.venueId.unique()).sample(frac=0.5).values.squeeze() nyc_check_sticc = nyc_check_in[(nyc_check_in.venueCateg.isin(venueCateg_list))&(nyc_check_in.venueId.isin(venueId_list))] print(nyc_check_sticc.shape) nyc_check_sticc.head(1) def return_week(x): if x == "Mon": return 1 elif x == "Tue": return 2 elif x == "Wed": return 3 elif x == "Thu": return 4 elif x == "Fri": return 5 elif x == "Sat": return 6 elif x == "Sun": return 7 def return_category(x): if x == "Gym": return 1 elif x == "Coffee Shop": return 2 elif x == "Office": return 3 elif x == "Home (private)": return 4 elif x == "Subway": return 5 nyc_check_sticc["week_attr"] = nyc_check_sticc["week"].apply(lambda x: return_week(x)) nyc_check_sticc["category"] = nyc_check_sticc["venueCateg"].apply(lambda x: return_category(x)) nyc_check_sticc = nyc_check_sticc.reset_index().drop("index", axis=1) nyc_check_sticc.head(1) kd = libpysal.cg.KDTree(np.array(nyc_check_sticc[["latitude", "longitude"]].values)) wnn = libpysal.weights.KNN(kd, 3) nearest_pt = pd.DataFrame().from_dict(wnn.neighbors, orient="index") for i in range(nearest_pt.shape[1]): nearest_pt = nearest_pt.rename({i:f"n_pt_{i}"}, axis=1) nearest_pt.head(1) nyc_check_sticc = nyc_check_sticc.join(nearest_pt) nyc_check_sticc.head(1) nyc_check_sticc[["week_attr", "hour", "n_pt_0", "n_pt_1", "n_pt_2"]].to_csv(r'nyc_checkin.txt', header=None, index=True, sep=',') w_voronoi = weights.Voronoi.from_dataframe(nyc_check_sticc) ``` # STICC ``` !python STICC_main.py --fname=nyc_checkin.txt --oname=result_nyc_checkin.txt --attr_idx_start=1 \ --attr_idx_end=2 --spatial_idx_start=3 --spatial_idx_end=5 \ --spatial_radius 4 --number_of_clusters 2 --lambda_parameter 10e-2 --beta 5 --maxIters 20 group = pd.read_table('result_nyc_checkin.txt', names=["group"]) result_nyc_check_sticc = nyc_check_sticc.join(group) result_nyc_check_sticc = result_nyc_check_sticc.rename({"category": "clus_group_gt"}, axis=1) print("Adjusted rand score", adjusted_rand_score(result_nyc_check_sticc["group"].values, result_nyc_check_sticc.clus_group_gt.values)) sp_contiguity = cal_joint_statistic(result_nyc_check_sticc, w_voronoi) print("Spatial contiguity: ", sp_contiguity) get_max_f1_score(result_nyc_check_sticc) ``` # Other methods ``` def get_pycluster_result(ground_truth, cluster_method): data = ground_truth[["week_attr", "hour"]].values # For K-Means data = ground_truth[["week_attr", "hour", "latitude", "longitude"]].values # For Sp K-Means if cluster_method == kmeans: initial_centers = kmeans_plusplus_initializer(data.tolist(), 2).initialize() instance = cluster_method(data.tolist(), initial_centers) elif cluster_method == cure: print("cure") instance = cure(data, 2) else: instance = cluster_method(data.tolist(), 2) instance.process() clusters = instance.get_clusters() clusters_result = [] for i, clus in enumerate(clusters): for data in clus: clusters_result.append([data, i]) clusters_result_df = pd.DataFrame(clusters_result, columns=["pt", "group"]).sort_values("pt").set_index("pt") return clusters_result_df ``` # K-Means ``` group = get_pycluster_result(nyc_check_sticc, kmeans) result_nyc_check_sticc = nyc_check_sticc.join(group) result_nyc_check_sticc = result_nyc_check_sticc.rename({"category": "clus_group_gt"}, axis=1) print("Adjusted rand score", adjusted_rand_score(result_nyc_check_sticc["group"].values, result_nyc_check_sticc.clus_group_gt.values)) sp_contiguity = cal_joint_statistic(result_nyc_check_sticc, w_voronoi) print("Spatial contiguity: ", sp_contiguity) get_max_f1_score(result_nyc_check_sticc) ``` # Sp K-Means ``` group = get_pycluster_result(nyc_check_sticc, kmeans) result_nyc_check_sticc = nyc_check_sticc.join(group) result_nyc_check_sticc = result_nyc_check_sticc.rename({"category": "clus_group_gt"}, axis=1) print("Adjusted rand score", adjusted_rand_score(result_nyc_check_sticc["group"].values, result_nyc_check_sticc.clus_group_gt.values)) sp_contiguity = cal_joint_statistic(result_nyc_check_sticc, w_voronoi) print("Spatial contiguity: ", sp_contiguity) get_max_f1_score(result_nyc_check_sticc) nyc_check_sticc.head(1) ``` # CURE ``` group = get_pycluster_result(nyc_check_sticc, cure) result_nyc_check_sticc = nyc_check_sticc.join(group) result_nyc_check_sticc = result_nyc_check_sticc.rename({"category": "clus_group_gt"}, axis=1) print("Adjusted rand score", adjusted_rand_score(result_nyc_check_sticc["group"].values, result_nyc_check_sticc.clus_group_gt.values)) sp_contiguity = cal_joint_statistic(result_nyc_check_sticc, w_voronoi) print("Spatial contiguity: ", sp_contiguity) get_max_f1_score(result_nyc_check_sticc) ``` # GMM ``` from sklearn.mixture import GaussianMixture gmm_data = nyc_check_sticc.copy() gmm_data.head(1) X = gmm_data[['hour', 'week_attr']].values gm = GaussianMixture(n_components=2).fit(X) gmm = pd.DataFrame(gm.predict(X), columns=["group"]) gmm.head(1) result_nyc_check_sticc = nyc_check_sticc.join(gmm) result_nyc_check_sticc = result_nyc_check_sticc.rename({"category": "clus_group_gt"}, axis=1) print("Adjusted rand score", adjusted_rand_score(result_nyc_check_sticc["group"].values, result_nyc_check_sticc.clus_group_gt.values)) sp_contiguity = cal_joint_statistic(result_nyc_check_sticc, w_voronoi) print("Spatial contiguity: ", sp_contiguity) get_max_f1_score(result_nyc_check_sticc) ```
github_jupyter
# Training of the model for Thumb Classification The goal of this notebook is to train a classification model that can detect thumb up and thumb down in video stream This notebook has been run on Google Colab to take advantage of the GPU. ``` import numpy as np import os import shutil from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import tensorflow as tf import keras from keras.preprocessing.image import ImageDataGenerator from keras import backend as K from keras.applications.vgg16 import VGG16 from keras.applications.vgg16 import preprocess_input as preprocess_input_vgg from keras.layers import Dense, Dropout, Flatten from keras.models import Model from keras import optimizers from keras.models import load_model import seaborn as sns ``` Define the three classification category ``` NB_CLASSES = 3 THUMB_UP = '01' THUMB_DOWN = '02' OTHER = '03' PATH_TO_ZIP = '/content/DSTI_Python_Labs/assets/data_train' ZIP_FILE = 'thumbv3.zip' IMAGES_RAW = 'images_raw_thumb' IMAGES_SPLITED = 'images_keras_thumb' MODEL_FILE = 'model_thumb_v24112020.h5' SAVE_FOLDER = 'save_model_thumb' ``` ## Get the training data from the github repository ``` !git clone https://github.com/EricKiennemann/DSTI_Python_Labs.git cd $PATH_TO_ZIP !mkdir $IMAGES_RAW ``` **Unzip the file training data file locally locally** ``` import zipfile with zipfile.ZipFile(ZIP_FILE, 'r') as zip_ref: zip_ref.extractall(IMAGES_RAW) !ls $IMAGES_RAW ``` ## Prepare the files for the processing Split the files into three datasets (folders) : * "train" for training * "valid" for validation * "test" for test ``` def TrainValidTestFruit(category): # Path to the directory where the original dataset was uncompressed original_dataset_dir = IMAGES_RAW # Directory where the three datasets will be stored base_dir = IMAGES_SPLITED os.mkdir(base_dir) # Directory for the training splits train_dir = os.path.join(base_dir, 'train') os.mkdir(train_dir) # Directory for the validation splits valid_dir = os.path.join(base_dir, 'valid') os.mkdir(valid_dir) # Directory for the test splits test_dir = os.path.join(base_dir, 'test') os.mkdir(test_dir) for cat in category: # Directories for training categories train_category_dir = os.path.join(train_dir, cat) os.mkdir(train_category_dir) # Directories for validation categories valid_category_dir = os.path.join(valid_dir, cat) os.mkdir(valid_category_dir) # Directories for test categories test_category_dir = os.path.join(test_dir, cat) os.mkdir(test_category_dir) data_folder = os.path.join(original_dataset_dir, cat) jpgfiles = os.listdir(data_folder) nb_images = len(jpgfiles) train_ratio = 0.75 # 75% of files for training validation_ratio = 0.15 # 15% of files for validation test_ratio = 0.10 # 10% of files for test dataX = np.arange(nb_images) # train is now 75% of the entire data set x_train, x_test = train_test_split(dataX, test_size=1 - train_ratio) # test is now 10% of the initial data set # validation is now 15% of the initial data set x_valid, x_test = train_test_split(x_test, test_size=test_ratio/(test_ratio + validation_ratio)) # Copy the train files fnames = [jpgfiles[i] for i in x_train] for fname in fnames: src = os.path.join(original_dataset_dir, cat, fname) dst = os.path.join(train_category_dir, fname) shutil.copyfile(src, dst) # Copy the validation files fnames = [jpgfiles[i] for i in x_valid] for fname in fnames: src = os.path.join(original_dataset_dir, cat, fname) dst = os.path.join(valid_category_dir, fname) shutil.copyfile(src, dst) # Copy the test files fnames = [jpgfiles[i] for i in x_test] for fname in fnames: src = os.path.join(original_dataset_dir, cat, fname) dst = os.path.join(test_category_dir, fname) shutil.copyfile(src, dst) # Sanity Check to ensure that Training, Validation and Test Folders have the expected number of images print('Number of Images in Training Directory is {} for category {}'.format(len(os.listdir(train_category_dir)),cat)) print('Number of Images in Validation Directory is {} for category {}'.format(len(os.listdir(valid_category_dir)),cat)) print('Number of Images in Test Directory is {} for category {}'.format(len(os.listdir(test_category_dir)),cat)) # Run the creation of the three datasets on our three labels TrainValidTestFruit([THUMB_UP,THUMB_DOWN,OTHER]) ``` The dataset is quit well balanced between 'thumb up' 517 images and 'thumb down' 593 images for training ## Building the Neural Network We'll be using VGG16 model and the corresponding preprocessing function for the input images. ``` # include_top=false => we only take the convolutional part not the classification part. # The image standard size is (224,224) base_model = VGG16(include_top=False, weights='imagenet', input_shape = (224,224,3)) base_model.summary() ``` Note that we have downloaded only a convolution part of the neural network. Let's add some dense layers on top of it. I choose a sigmoid activation in order to be able to dect more easelly when there is "nothing" in the screen. If the probability for both 'thumb up' and 'thumb down' are low it is likely that there is no thumb on the screen ``` flatten = Flatten()(base_model.output) dropout_1 = Dropout(0.25)(flatten) fc_1 = Dense(128)(dropout_1) dropout_2 = Dropout(0.5)(fc_1) predictions = Dense(NB_CLASSES, activation="sigmoid", name='predictions')(dropout_2) model = Model(base_model.input, predictions) ``` **The final model structure** ``` model.summary() ``` **Chosing the optimizer parameters and compiling the model** Categorical crossentropy is choosen for this multi label classification problem ``` loss = 'categorical_crossentropy' learning_rate = 0.001 optimizer = optimizers.SGD ## optimizers.SGD metrics = ['accuracy'] model.compile(loss=loss, optimizer=optimizer(learning_rate), metrics=metrics) ``` ## Data preparation We will do data augmentation in order to have more data for the training. We apply : * rotation * width shift * height shift ``` train_dir = os.path.join(IMAGES_SPLITED, "train") val_dir = os.path.join(IMAGES_SPLITED, "valid") test_dir = os.path.join(IMAGES_SPLITED, "test") # we'll resize images in correspondance to network input size image_size = (224,224) # apply some data augmentation # train_datagen = ImageDataGenerator(rotation_range=15, width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest', preprocessing_function=preprocess_input_vgg ) validation_datagen = ImageDataGenerator(preprocessing_function=preprocess_input_vgg) # for validation we don't need to augment train_batchsize = 40 val_batchsize = 40 # this function takes images from folders and feeds to Imagedatagenerator train_generator = train_datagen.flow_from_directory( train_dir, target_size=image_size, batch_size=train_batchsize, class_mode='categorical') validation_generator = validation_datagen.flow_from_directory( val_dir, target_size=image_size, batch_size=val_batchsize, class_mode='categorical', shuffle=False) ``` **The data generation is only applied to the train dataset** We do have 1370 images for training (without data augmentation) and 273 images for validation ## Model training Starting with a number of epoch equal to 100 ``` epochs = 60 nb_train_steps = train_generator.samples // train_generator.batch_size nb_val_steps = validation_generator.samples // validation_generator.batch_size history = model.fit_generator( train_generator, steps_per_epoch=nb_train_steps, epochs=epochs, validation_data=validation_generator, validation_steps=nb_val_steps, verbose=1, #0 ) ``` **The accuracy for training and validation dataset are good** ``` print('training acc.:',history.history['accuracy'][-1]) print('val acc.:', (history.history['val_accuracy'])[-1]) import matplotlib.pyplot as plt %matplotlib inline def plot_history(history): plt.figure(figsize=(12,6)) plt.subplot(1,2,1) plt.xlabel('Epoch') plt.ylabel('Accuracy %') plt.plot(history.epoch, np.array(history.history['accuracy']), label='Train Accuracy') plt.plot(history.epoch, np.array(history.history['val_accuracy']), label = 'Val Accuracy') plt.legend() plt.title('Accuracy for train and validation') plt.ylim([0, 1.1]) plt.subplot(1,2,2) plt.xlabel('Epoch') plt.ylabel('loss') plt.plot(history.epoch, np.array(history.history['loss']), label='Train Loss') plt.plot(history.epoch, np.array(history.history['val_loss']), label = 'Validation Loss') plt.legend() plt.title('Loss for train and validation') plt.show() plot_history(history) ``` ## Saving model The model is saved to be used in the back end part of the web application ``` os.makedirs(SAVE_FOLDER, exist_ok=True) model_path = os.path.join(SAVE_FOLDER, MODEL_FILE) model.save(model_path) ``` ## Final test on the test Dataset ``` model = load_model(model_path) ``` Apply the same preprocessing on the images as for validation dataset ``` test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input_vgg) ``` Realize the prediction ``` test_generator = test_datagen.flow_from_directory( test_dir, target_size=image_size, shuffle = False, class_mode='categorical', batch_size=1) filenames = test_generator.filenames nb_samples = len(filenames) predict = model.predict(test_generator,steps=nb_samples) ``` The prediction has been done for 185 images **See the result of the prediction** ``` def show_classification_confusion_matrix(y_valid,y_fit,list_classes): print(classification_report(y_valid, y_fit, target_names = list_classes)) mat = confusion_matrix(y_valid, y_fit) sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False, xticklabels=list_classes, yticklabels=list_classes) plt.xlabel('true label') plt.ylabel('predicted label') # choose the higher probability as the best prediction y_pred = np.argmax(predict, axis=1) classes = ["{:02d}".format(i) for i in range(1, 4)] show_classification_confusion_matrix(test_generator.classes,y_pred,classes) ``` **All the images has been correctly predicted** **The model is kept and will be use for the web application** ## Store the model file on a google account ``` from google.colab import drive drive.mount('/content/gdrive') %cp $model_path ../../../gdrive/'My Drive' ```
github_jupyter
# Neuromorphic Computing Course ## 0. Example Code ### Download the program and move it up one directory. ``` # Delete everything in the content (current) directory on google colab !rm -rf /content/* || echo rm -rf /content/* failed # Clone git repo, change the branch and move it up by one level in the folder hierarchy !git clone https://gitlab.socsci.ru.nl/snnsimulator/simsnn.git !mv ./simsnn ./simsnnn !mv ./simsnnn/* ./ !rm -rf simsnnn || echo rm -rf simsnnn failed ``` ### Creating a programmed neuron. ``` from simsnn.core.networks import Network from simsnn.core.simulators import Simulator # Create the network and the simulator object net = Network() sim = Simulator(net) # Create a programmed neuron, that spikes on times 1 and 3, # does not repeat it's programming and has the ID "pn". programmed_neuron = net.createInputTrain(train=[0,1,0,1], loop=False, ID="pn") # Add all neurons to the raster sim.raster.addTarget(programmed_neuron) # Add all neurons to the multimeter sim.multimeter.addTarget(programmed_neuron) # Run the simulation for 10 rounds, enable the plotting of the raster, # the multimeter and the network structure. sim.run(steps=10, plotting=True) ``` Do you understand what is going on? ### Connecting two neurons with a synapse. ``` from simsnn.core.networks import Network from simsnn.core.simulators import Simulator net = Network() sim = Simulator(net) programmed_neuron = net.createInputTrain(train=[0,1,0,1], loop=False, ID="pn") # Create a LIF neuron, with a membrane voltage threshold of 1, # a post spike reset value of 0 and no voltage decay (m=1). lif_neuron = net.createLIF(ID="ln", thr=1, V_reset=0, m=1) # Create a Synapse, between the programmed neuron and the LIF neuron, # with a voltage weight of 1 and a delay of 1. net.createSynapse(pre=programmed_neuron, post=lif_neuron, ID="pn-ln", w=1, d=1) sim.raster.addTarget([programmed_neuron, lif_neuron]) sim.multimeter.addTarget([programmed_neuron, lif_neuron]) sim.run(steps=10, plotting=True) ``` Note how the LIF neuron does not ever seem to get any voltage. This is just an artifact of the timing of the voltage measurement. The voltages are measured at the end of every discrete timestep. When a LIF neuron spikes, its voltage will be reset to the V_reset value, which is 0 in this case. ### Creating an endlessly spiking neuron ``` from simsnn.core.networks import Network from simsnn.core.simulators import Simulator net = Network() sim = Simulator(net) # Create a neuron that has threshold of 4, a post spike reset value of 0, # no voltage decay and a constant input current of 1 lif_neuron = net.createLIF(ID="ln", thr=4, V_reset=0, m=1, I_e=1) sim.raster.addTarget([lif_neuron]) sim.multimeter.addTarget([lif_neuron]) sim.run(steps=10, plotting=True) ```
github_jupyter
# RadarCOVID-Report ## Data Extraction ``` import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 ``` ### Constants ``` from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 ``` ### Parameters ``` environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates ``` ### COVID-19 Cases ``` report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df = \ confirmed_source_regions_group_df.merge( confirmed_days_df[["sample_date_string"]].rename( columns={"sample_date_string": "sample_date"}), how="right") confirmed_source_regions_group_df["new_cases"] = \ confirmed_source_regions_group_df["new_cases"].clip(lower=0) confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan) confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) ``` ### Extract API TEKs ``` raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() ``` ### Dump API TEKs ``` tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() ``` ### Load TEK Dumps ``` import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() ``` ### Daily New TEKs ``` tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() ``` ### Hourly New TEKs ``` hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() ``` ### Official Statistics ``` import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() ``` ### Data Merge ``` result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary ``` ## Report Results ``` display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] ``` ### Daily Summary Table ``` result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df ``` ### Daily Summary Plots ``` result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) ``` ### Daily Generation to Upload Period Table ``` display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() ``` ### Hourly Summary Plots ``` hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) ``` ### Publish Results ``` github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) ``` ### Save Results ``` report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") ``` ### Publish Results as JSON ``` def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) ``` ### Publish on README ``` with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) ``` ### Publish on Twitter ``` enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids) ```
github_jupyter
# Classifying cancer from 32 parameters Data is taken from https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29 We simply read all the data, drop the patient ID and place the label into an array of it's own. ``` import csv import numpy with open('data_Cancer.csv') as input_file: text_data = [row for row in csv.reader(input_file, delimiter=',')] for line in text_data: _ = line.pop(0) #We remove the ID - no need for it known_labels = ','.join([line.pop(0) for line in text_data]) raw_data = numpy.array(text_data).astype(numpy.float) data = raw_data / numpy.max(raw_data, axis = 0) ``` Now we can write a generic clustering mechanism, similar to the small previous example. ``` def all_dist(observation, data): return numpy.sqrt((data[:, 0] - observation[0])**2 + (data[:, 1] - observation[1])**2) def cluster(data, k): samples, _= data.shape centroids = numpy.array([data[numpy.random.randint(samples), :,] for _ in range(k)]) done = False while not done: distances = numpy.empty((k,samples)) for d in range(k): distances[d, :] = all_dist(centroids[d], data) winners = numpy.argmin(distances, axis = 0) clusters = [data[winners == i, :] for i in range(k)] prev_centroids = centroids centroids = numpy.array([numpy.average(c, axis = 0) for c in clusters]) if numpy.sum(prev_centroids-centroids) == 0: done=True return winners ``` Now we can find the clusters, since we have only two categories its rather fast. We cannot know if category 0 is malign or benign, but have to assume that the smaller category is malign. We thus change the labels to that assumption. Then we can easily compare the classifications of each patient and check who well we did. ``` clusters = cluster(data, 2) a, b = numpy.bincount(clusters) labels = known_labels+'' if a<b: labels = labels.replace('M','0') labels = labels.replace('B','1') else: labels = labels.replace('M','1') labels = labels.replace('B','0') compare = (numpy.equal(clusters, numpy.array(labels.split(',')).astype(numpy.int))) print(numpy.bincount(compare),'(Wrong, Right)') ``` Run it a few times and realize that success differ extremely. Several approaches can be tried to remedy this. Try and simply remove one or more dimensions to see if they are merely in the way (really: do a PCA but QaD tests are ok as well). Try and change the distance metric for individual dimensions, so rather than simply include or not at in the first appraoch, we can tune the importance of a parameter. ``` def cluster(data, k, centroids = []): samples, _= data.shape if centroids == []: centroids = numpy.array([data[numpy.random.randint(samples), :,] for _ in range(k)]) done = False while not done: distances = numpy.empty((k,samples)) for d in range(k): distances[d, :] = all_dist(centroids[d], data) winners = numpy.argmin(distances, axis = 0) clusters = [data[winners == i, :] for i in range(k)] prev_centroids = centroids clusters = [c for c in clusters if len(c)>0] k = len(clusters) centroids = numpy.array([numpy.average(c, axis = 0) for c in clusters]) if len(prev_centroids) == len(centroids): if numpy.sum(prev_centroids-centroids) == 0: done=True return winners, centroids target_k = 2 n_centroids = 25 centroids = [] while n_centroids > target_k: clusters, centroids = cluster(data, n_centroids, centroids) if ( n_centroids > target_k ) and ( len(centroids) == n_centroids ): centroid_dist = numpy.sum(numpy.sqrt((centroids[:, numpy.newaxis, :]-centroids)**2), axis =2) centroid_dist[centroid_dist==0] = 1000.0 centroids = list(centroids) minpos = numpy.argmin(centroid_dist) point0, point1 = centroids.pop(minpos//n_centroids), centroids.pop((minpos%n_centroids)-1) #-1 because we pop centroids.append((point0 + point1)/2) n_centroids -= 1 else: n_centroids = len(centroids) clusters, centroids = cluster(data, n_centroids, centroids) #We have the number of required centroids now a, b = numpy.bincount(clusters) labels = known_labels+'' if a<b: labels = labels.replace('M','0') labels = labels.replace('B','1') else: labels = labels.replace('M','1') labels = labels.replace('B','0') compare = (numpy.equal(clusters, numpy.array(labels.split(',')).astype(numpy.int))) print(numpy.bincount(compare),'(Wrong, Right)') ``` *** Note to self - try with many more clusters, and after convergence, fuse the two clusters that are closest to one and repeat training. Repeat until the desired number of clusters are found. Fusing: simple mean, weighted mean or most discriminating (one furthest away from other centroids) ***
github_jupyter
## Loading an HCA matrix into scanpy This vignette illustrates requesting an expression matrix from the HCA matrix service and loading it into scanpy. First, install and import some dependencies: ``` import sys !{sys.executable} -m pip install python-igraph loompy louvain pandas requests scanpy import json, os, requests, scanpy.api as sc, shutil, time, zipfile, warnings ``` Now, we're going to make some requests to describe what fields and values we can filter on when we're selecting our matrix. ``` MATRIX_URL = "https://matrix.data.humancellatlas.org/v1" resp = requests.get(MATRIX_URL + "/filters") print(resp.text) ``` That's the list of metadata fields we can filter on when requesting the matrix. We can describe any of them with further API calls. When we request categorical data, we see the number of cells associated with each category. For numeric, we see the range of value. ``` resp = requests.get(MATRIX_URL + "/filters/project.project_core.project_short_name") print(resp.text) resp = requests.get(MATRIX_URL + "/filters/genes_detected") print(resp.text) resp = requests.get(MATRIX_URL + "/filters/analysis_protocol.protocol_core.protocol_id") print(resp.text) ``` If we want to request a matrix based on these metadata values, we can add them to the `filter` in the body of a POST request to the matrix service: ``` resp = requests.post( MATRIX_URL + "/matrix", json={"filter": {"op": "and", "value": [ {"op": "=", "value": "Single cell transcriptome analysis of human pancreas", "field": "project.project_core.project_short_name"}, {"op": ">=", "value": 300, "field": "genes_detected"} ] }}) print(resp.text) ``` That call responds right away and tells us that the matrix is being prepared. We can use the `request_id` to wait until the matrix is done. ``` while True: status_resp = requests.get( MATRIX_URL + "/matrix/" + resp.json()["request_id"]) if status_resp.json()["status"] == "Complete": break print(status_resp.json()["status"], "Waiting...") time.sleep(30) print(status_resp.text) ``` Now, that the matrix is ready, we can download it. The file we download is a zip archive that contains a readme and a loom-formatted matrix. Loom is the default matrix format, but others can be specified in the matrix request. ``` matrix_response = requests.get(status_resp.json()["matrix_url"], stream=True) matrix_zip_filename = os.path.basename(status_resp.json()["matrix_url"]) with open(matrix_zip_filename, 'wb') as matrix_zip_file: shutil.copyfileobj(matrix_response.raw, matrix_zip_file) ``` ## HCA Matrix Service Loom Output The loom-formatted output from the matrix service is a zip archive that contains two files: | Filename | Description | |------------------------------------|-------------------------------| | `loom_readme.md` | This readme | | `<file_name>.loom` | Loom file with requested data | The Loom format is documented more fully, along with code samples, [here](https://linnarssonlab.org/loompy/index.html). Per Loom [conventions](https://linnarssonlab.org/loompy/conventions/index.html), columns in the loom-formatted expression matrix represent cells, and rows represent genes. The column and row attributes follow Loom conventions where applicable as well: `CellID` uniquely identifies a cell, `Gene` is a gene name, and `Accession` is an ensembl gene id. Descriptions of the remaining metadata fields are available at the [HCA Data Browser](https://prod.data.humancellatlas.org/metadata). And finally, we can use the `read_loom` method from scanpy, to load the matrix and peforms some analyses. Note that we specify `var_names="Accession"` so we get unique gene names. ``` matrix_loom_filename = matrix_zip_filename.rstrip(".zip") adata = sc.read_loom(matrix_loom_filename, var_names="Accession") adata.shape ``` We can perform some standard scanpy tasks, like nearest-neighbor calculation and clustering. ``` ?sc.pp.neighbors ?sc.pp.pca sc.pp.normalize_per_cell(adata) sc.pp.neighbors(adata) sc.tl.umap(adata) sc.pl.umap(adata, color="project.project_core.project_short_name", legend_loc="lower center", legend_fontsize=6) sc.tl.louvain(adata, resolution=0.2) sc.pl.umap(adata, color=["louvain"]) with warnings.catch_warnings(): warnings.simplefilter('ignore', RuntimeWarning) # catch a repetitive louvain warning sc.tl.rank_genes_groups(adata, 'louvain') sc.pl.rank_genes_groups(adata, n_genes=16, fontsize=12, gene_symbols="Gene") ```
github_jupyter
Tutorial on computational modeling and statistical model fitting part of the *IBL Computational Neuroscience Course* organized by the [International Brain Laboratory](https://www.internationalbrainlab.com/) (April 2020). **Lecturer:** [Luigi Acerbi](http://luigiacerbi.com/). **Instructions:** - To run the tutorial, you will need a standard scientific Python 3.x installation with Jupyter notebook (such as [Anaconda](https://www.anaconda.com/distribution/)). - You will also need the `CMA-ES` optimization algorithm (see [here](https://github.com/CMA-ES/pycma)). You can install CMA-ES from the command line with `pip install cma`. - For any question, please email the course instructor at [email protected]. **Initial setup and loading the data:** ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd import scipy as sp from scipy.stats import norm import cma ``` During this tutorial, we are going to use data from the [International Brain Laboratory](https://www.internationalbrainlab.com/) publicly released behavioral mouse dataset, from exemplar mouse `KS014`. See [this preprint](https://www.biorxiv.org/content/10.1101/2020.01.17.909838v2) for more information about the task and datasets. These data can also be inspected via the IBL DataJoint public interface [here](https://data.internationalbrainlab.org/mouse/18a54f60-534b-4ed5-8bda-b434079b8ab8). For convenience, the data of all behavioral sessions from examplar mouse `KS014` have been already downloaded in the `data` folder and slightly preprocessed into two `.csv` files, one for the training sessions (`KS014_train.csv`) and one with the *biased* sessions (`KS014_biased.csv`). We begin our tutorial by examining the training sessions. ``` df = pd.read_csv('./data/KS014_train.csv') # Load .csv file into a pandas DataFrame df['signed_contrast'] = df['contrast']*df['position'] # We define a new column for "signed contrasts" df.drop(columns='stim_probability_left', inplace=True) # Stimulus probability has no meaning for training sessions print('Total # of trials: ' + str(len(df['trial_num']))) print('Sessions: ' + str(np.unique(df['session_num']))) df.head() ``` **Inspecting the data:** The first thing to do with any dataset is to get familiar with it by running simple visualizations. Just plot stuff! For example, as a starter we plot data from individual sessions using a *scatterplot* format (perhaps not the best). What can we see from here? ``` def scatterplot_psychometric_data(df,session_num=None,ax=None): """Plot psychometric data (optionally, of a chosen training session) as a scatter plot.""" if session_num == None: trial_mask = np.ones(len(df['session_num']), dtype=bool) # Select all trials else: trial_mask = df['session_num'] == session_num # Indexes of trials of the chosen session Ntrials = np.sum(trial_mask) # Number of chosen trials # Count "left" and "right" responses for each signed contrast level left_resp = df[(df['response_choice'] == -1) & trial_mask].groupby(['signed_contrast']).count()['trial_num'] right_resp = df[(df['response_choice'] == 1) & trial_mask].groupby(['signed_contrast']).count()['trial_num'] if ax == None: ax=fig.add_axes([0,0,1,1]) ax.scatter(left_resp.index,np.zeros(len(left_resp.index)), s=left_resp*10); ax.scatter(right_resp.index,np.ones(len(right_resp.index)), s=right_resp*10); ax.set_xlabel('Signed contrast (%)') ax.set_ylabel('Rightward response') if session_num == None: ax.set_title('Psychometric data (# trials = ' + str(Ntrials) + ')') else: ax.set_title('Psychometric data (session ' + str(session_num) + ', # trials = ' + str(Ntrials) + ')') return ax # Plot 2nd session fig = plt.figure(figsize=(9,4)) scatterplot_psychometric_data(df,2) plt.show() # Plot 15th session (last training session) fig = plt.figure(figsize=(9,4)) scatterplot_psychometric_data(df,15) plt.show() ``` We plot the same data again, this time with a different type of plot which may be more informative. ``` def plot_psychometric_data(df,session_num=None,ax=None): """Plot psychometric data (optionally, of a chosen training session) as a scatter plot.""" if session_num == None: trial_mask = np.ones(len(df['session_num']), dtype=bool) # Select all trials else: trial_mask = df['session_num'] == session_num # Indexes of trials of the chosen session Ntrials = np.sum(trial_mask) # Number of chosen trials # Count "left" and "right" responses for each signed contrast level left_resp = df[(df['response_choice'] == -1) & trial_mask].groupby(['signed_contrast']).count()['trial_num'] right_resp = df[(df['response_choice'] == 1) & trial_mask].groupby(['signed_contrast']).count()['trial_num'] frac_resp = right_resp / (left_resp + right_resp) err_bar = np.sqrt(frac_resp*(1-frac_resp)/(left_resp + right_resp)) # Why this formula for error bars? if ax == None: ax=fig.add_axes([0,0,1,1]) ax.errorbar(x=left_resp.index,y=frac_resp,yerr=err_bar,label='data'); ax.set_xlabel('Signed contrast (%)') ax.set_ylabel('Rightward response') if session_num == None: ax.set_title('Psychometric data (# trials = ' + str(Ntrials) + ')') else: ax.set_title('Psychometric data (session ' + str(session_num) + ', # trials = ' + str(Ntrials) + ')') plt.xlim((-105,105)) plt.ylim((0,1)) return ax fig = plt.figure(figsize=(9,4)) plot_psychometric_data(df,2) plt.show() fig = plt.figure(figsize=(9,4)) plot_psychometric_data(df,15) plt.show() ``` **The psychometric function model:** We define now the `basic` psychometric function (descriptive) model and a plotting function. ``` def psychofun(theta,stim): """Psychometric function based on normal CDF and lapses""" mu = theta[0] # bias sigma = theta[1] # slope/noise lapse = theta[2] # lapse rate if len(theta) == 4: # lapse bias lapse_bias = theta[3]; else: lapse_bias = 0.5 # if theta has only three elements, assume symmetric lapses p_right = norm.cdf(stim,loc=mu,scale=sigma) # Probability of responding "rightwards", without lapses p_right = lapse*lapse_bias + (1-lapse)*p_right # Adding lapses return p_right def psychofun_plot(theta,ax): """Plot psychometric function""" stim = np.linspace(-100,100,201) # Create stimulus grid for plotting p_right = psychofun(theta,stim) # Compute psychometric function values ax.plot(stim,p_right,label='model') ax.legend() return ``` Now try plotting the psychometric function for different values of the parameters (use both the symmetric and asymmetric psychometric function). Try and match the data from one of the sessions. ``` theta0 = (0,50,0.2,0.5) # Arbitrary parameter values - try different ones session_num = 15 fig = plt.figure(figsize=(9,4)) ax = plot_psychometric_data(df,session_num) psychofun_plot(theta0,ax) plt.show() ``` We now define the log likelihood function of the psychometric function model for a given dataset and model parameter vector, $\log p(\text{data}|\mathbf{\theta})$. ``` def psychofun_loglike(theta,df): """Log-likelihood for psychometric function model""" s_vec = df['signed_contrast'] # Stimulus values r_vec = df['response_choice'] # Responses p_right = psychofun(theta,s_vec) # Compute summed log likelihood for all rightwards and leftwards responses loglike = np.sum(np.log(p_right[r_vec == 1])) + np.sum(np.log(1 - p_right[r_vec == -1])) return loglike ``` Now try to get the best fit for this session, as we did before, but by finding better and better values of the log-likelihood. ``` session_num = 14 # Let's use a different session theta0 = (0,25,0.1,0.5) ll = psychofun_loglike(theta0,df[df['session_num'] == session_num]) print('Log-likelihood value: ' + "{:.3f}".format(ll)) fig = plt.figure(figsize=(9,4)) ax = plot_psychometric_data(df,session_num) psychofun_plot(theta0,ax) plt.show() ``` **Maximum-likelihood estimation:** In this section, we are going to estimate model parameters (aka fit our models) by maximizing the log-likelihood. By convention in optimization, we are going to *minimize* the negative log-likelihood. Before running the optimization, we define the *hard* lower and upper bounds for the parameters. If the optimization algorithm supports constrained (bound) optimization, it will never go outside the hard bounds. We also define informally the *plausible* bounds as the range of parameters that we would expect to see. We are going to use the plausible range to initialize the problem later. ``` # Define hard parameter bounds lb = np.array([-100,0.5,0,0]) ub = np.array([100,200,1,1]) bounds = [lb,ub] # Define plausible range plb = np.array([-25,5,0.05,0.2]) pub = np.array([25,25,0.40,0.8]) # Pick session data session_num = 14 df_session = df[df['session_num'] == session_num] # Define objective function: negative log-likelihood opt_fun = lambda theta_: -psychofun_loglike(theta_,df_session) ``` We are now going to run a *black-box* optimization algorithm called CMA-ES. For now we are going to run the optimization only once, but in general you should *always* run the optimization from multiple distinct starting points. ``` # Generate random starting point for the optimization inside the plausible box theta0 = np.random.uniform(low=plb,high=pub) # Initialize CMA-ES algorithm opts = cma.CMAOptions() opts.set("bounds",bounds) opts.set("tolfun",1e-5) # Run optimization res = cma.fmin(opt_fun, theta0, 0.5, opts) print('') print('Returned parameter vector: ' + str(res[0])) print('Negative log-likelihood at solution: ' + str(res[1])) fig = plt.figure(figsize=(9,4)) ax = plot_psychometric_data(df_session,session_num) psychofun_plot(res[0],ax) plt.show() ``` **Model comparison:** We consider now a slightly more advanced model which includes time dependency by having the response in the current trial being influenced by the response in the previous trial. We adopt a simple model, `repeatlast`, in which the observer has a fixed chance of repeating the previous choice. ``` def psychofun_repeatlast_loglike(theta,df): """Log-likelihood for last-choice dependent psychometric function model""" s_vec = np.array(df['signed_contrast']) # Stimulus values r_vec = np.array(df['response_choice']) # Responses p_last = theta[0] # Probability of responding as last choice theta_psy = theta[1:] # Standard psychometric function parameters p_right = psychofun(theta_psy,s_vec) # Starting from the 2nd trial, probability of responding equal to the last trial p_right[1:] = p_last*(r_vec[0:-1] == 1) + (1-p_last)*p_right[1:] # Compute summed log likelihood for all rightwards and leftwards responses loglike = np.sum(np.log(p_right[r_vec == 1])) + np.sum(np.log(1 - p_right[r_vec == -1])) return loglike lb = np.array([0,-100,1,0,0]) ub = np.array([1,100,100,1,1]) bounds = [lb,ub] plb = np.array([0.05,-25,5,0.05,0.2]) pub = np.array([0.2,25,25,0.45,0.8]) df_session = df[df['session_num'] == session_num] # df_session = df[(df['session_num'] == session_num) & (df['trial_num'] > 300)] opt_fun = lambda theta_: -psychofun_repeatlast_loglike(theta_,df_session) theta0 = np.random.uniform(low=plb,high=pub) opts = cma.CMAOptions() opts.set("bounds",bounds) opts.set("tolfun",1e-5) res_repeatlast = cma.fmin(opt_fun, theta0, 0.5, opts) print('') print('Returned parameter vector: ' + str(res_repeatlast[0])) print('Negative log-likelihood at solution: ' + str(res_repeatlast[1])) fig = plt.figure(figsize=(9,4)) ax = plot_psychometric_data(df_session,session_num) #psychofun_plot(res[0],ax) plt.show() ``` We now calculate a few model simple comparison metrics, such as AIC and BIC, for the `basic` and `repeatlast` models. ``` Nmodels = 2 nll = np.zeros(Nmodels) nparams = np.zeros(Nmodels) results = [res,res_repeatlast] # Store all optimization output in a vector for i in range(0,len(results)): nll[i] = results[i][1] # The optimization algorithm received the *negative* log-likelihood nparams[i] = len(results[i][0]) ntrials = len(df['signed_contrast']) aic = 2*nll + 2*nparams bic = 2*nll + nparams*np.log(ntrials) print('Model comparison results (for all metrics, lower is better)\n') print('Negative log-likelihoods: ' + str(nll)) print('AIC: ' + str(aic)) print('BIC: ' + str(bic)) ``` **[Advanced] Optional model:** We consider next a more advanced model which includes explicit time dependency (the trials are not all the same), also known as *non-stationarity*. Note that this function is not coded very efficiently and runs quite slowly due to the `for` loop - it could be improved with vectorization. ``` def psychofun_timevarying_loglike(theta,df): """Log-likelihood for time-varying psychometric function model""" s_vec = np.array(df['signed_contrast']) # Stimulus values r_vec = np.array(df['response_choice']) # Responses Ntrials = len(s_vec) mu_vec = np.linspace(theta[0],theta[4],Ntrials) sigma_vec = np.linspace(theta[1],theta[5],Ntrials) lapse_vec = np.linspace(theta[2],theta[6],Ntrials) lapsebias_vec = np.linspace(theta[3],theta[7],Ntrials) p_right = np.zeros(Ntrials) for t in range(0,Ntrials): p_right[t] = psychofun([mu_vec[t],sigma_vec[t],lapse_vec[t],lapsebias_vec[t]],s_vec[t]) # Compute summed log likelihood for all rightwards and leftwards responses loglike = np.sum(np.log(p_right[r_vec == 1])) + np.sum(np.log(1 - p_right[r_vec == -1])) return loglike theta0 = (0,20,0.1,0.5,1,20,0.1,0.5) ll = psychofun_timevarying_loglike(theta0,df[df['session_num'] == session_num]) lb = np.array([-100,1,0,0,-100,1,0,0]) ub = np.array([100,100,1,1,100,100,1,1]) bounds = [lb,ub] plb = np.array([-25,5,0.05,0.2,-25,5,0.05,0.2]) pub = np.array([25,25,0.45,0.8,25,25,0.45,0.8]) session_num = 14 df_session = df[df['session_num'] == session_num] # df_session = df[(df['session_num'] == session_num) & (df['trial_num'] > 300)] opt_fun = lambda theta_: -psychofun_timevarying_loglike(theta_,df_session) theta0 = np.random.uniform(low=plb,high=pub) opts = cma.CMAOptions() opts.set("bounds",bounds) opts.set("tolfun",1e-5) res_time = cma.fmin(opt_fun, theta0, 0.5, opts) print('') print('Returned parameter vector: ' + str(res_time[0])) print('Negative log-likelihood at solution: ' + str(res_time[1])) fig = plt.figure(figsize=(9,4)) ax = plot_psychometric_data(df_session,session_num) #psychofun_plot(res[0],ax) plt.show() ```
github_jupyter
``` # bem: triangulation and fmm/bem electrostatics tools # # Copyright (C) 2011-2012 Robert Jordens <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ``` # `bem` 3D electrostatics example ``` import sys import logging, os from time import time import numpy as np import matplotlib.pyplot as plt sys.path.append('../../') sys.path.append('../../../electrode/') from bem import Electrodes, Sphere, Mesh, Grid, Configuration, Result from bem.formats import stl # base file name for outputs and inputs is the script name try: # works only if we are a script prefix = os.path.splitext(__file__)[0] except NameError: # fallback for notebooks prefix = "SimpleTrap" # scale to natural units (ion height) scale = 40e-6 use_stl = True if not use_stl: # load electrode faces from loops ele = Electrodes.from_trap(open("%s.ele" % prefix), scale) # initial triangulation, area 20, quiet mesh = Mesh.from_electrodes(ele) mesh.triangulate(opts="qa10Q") else: # load electrode faces from colored stl s = stl.read_stl(open("%s.stl" % prefix, "rb")) mesh = Mesh.from_mesh(stl.stl_to_mesh(*s, scale=scale/1e-6, rename={9495: "DC1", 17962: "DC3", 18994: "DC5", 18869: "DC2", 20943: "RF", 18129: "DC4"})) def run_job(args): job, grid, prefix = args # refine twice adaptively with increasing number of triangles, min # angle 25deg job.adapt_mesh(triangles=4e2, opts="q25Q") job.adapt_mesh(triangles=1e3, opts="q25Q") # solve for charges job.solve_singularities(num_mom=4, num_lev=3) # get potentials and fields result = job.simulate(grid, field=job.name=="RF", num_lev=1) result.to_vtk(prefix) print("finished job %s" % job.name) return job.collect_charges() # set .1 max area within 3 mesh.areas_from_constraints(Sphere(center=np.array([0, 0, 1.]), radius=2, inside=.2, outside=10.)) # retriangulate quality and quiet with areas mesh.triangulate(opts="qQ", new=False) # save base mesh to vtk mesh.to_vtk(prefix) # grid to evalute potential and fields at n, s = 2*10, .1 grid = Grid(center=(0, 0, 1.5), step=(s, s, s), shape=(n, n, n)) # generate electrode potential configurations to simulate # use regexps to match electrode names jobs = list(Configuration.select(mesh, "DC.*", "RF")) # run the different electrodes on the parallel pool #pmap = Pool().map # parallel map pmap = map # serial map t0 = time() list(pmap(run_job, ((job, grid, prefix) for job in jobs))) # In python 3, convert map(...) to list(map(...)) print("Computing time: %f s"%(time()-t0)) # isocontour plot of the RF pseudopotential radially result = Result.from_vtk(prefix, "RF") p = result.pseudo_potential x = grid.to_mgrid()[:, p.shape[0]//2] # In python 3, use // p = p[p.shape[0]//2] fig, ax = plt.subplots() ax.set_aspect("equal") ax.contour(x[1], x[2], p, levels=np.linspace(0, 2e-2, 20), cmap=plt.cm.Reds) fig, ax = plt.subplots(subplot_kw=dict(aspect="equal")) mesh.plot(ax) # explore it in fancy 3D # fire up a mayavi2 window showing base mesh, charges on final mesh # and isosurfaces of the pseudopotential Result.view(prefix, "RF") # need to start the full eventloop for the window. # close it to return control to the notebook from pyface.api import GUI GUI().start_event_loop() from electrode import System, GridElectrode # load the electrostatics results into a electrode.System() s = System() for name in "DC1 DC2 DC3 DC4 DC5 RF".split(): r = Result.from_vtk(prefix, name) e = GridElectrode.from_result(r) e.name = name s.append(e) import scipy.constants as ct l = 40e-6 # length scale o = 100e6*2*np.pi # rf frequency m = 25*ct.atomic_mass # ion mass q = 1*ct.elementary_charge # ion charge rf_scale = s.rf_scale(m,q,l,o) s["RF"].rf = 25. # peak rf voltage method = 'Newton-CG' x0 = s.minimum((0, 0, 1.),method=method) for _ in s.analyze_static(x0, m=m, l=l, o=o, min_method=method): print(_) n = 30 #xyz = np.mgrid[-.1:.1:1j*n, -.1:.1:1j*n, 1.12:2] #xyz = np.mgrid[0:1, -.02:.02:1j*n, .5:1.5:1j*n] xyz = grid.to_mgrid() p = s.potential(xyz.reshape(3, -1).T, 0).reshape(xyz[0].shape) v = np.linspace(0, 2e-2, 21) fig, ax = plt.subplots() ax.set_aspect("equal") ax.contour(xyz[1, 10, :, :], xyz[2, 10, :, :], p[10, :, :], v, cmap=plt.cm.Reds_r) ```
github_jupyter
# Running cell2location on NanostringWTA data In this notebook we we map fetal brain cell types to regions of interest (ROIs) profiled with the NanostringWTA technology, using a version of our cell2location method recommended for probe based spatial transcriptomics data. This notebook should be read after looking at the main cell2location notebooks. Load the required modules and configure theano settings: ``` import sys,os import pickle import anndata import pandas as pd import numpy as np import matplotlib.pyplot as plt import scanpy as sc from IPython.display import Image data_type = 'float32' os.environ["THEANO_FLAGS"] = 'device=cuda,floatX=' + data_type + ',force_device=True' + ',dnn.enabled=False' import cell2location # Download data: os.mkdir('./data') os.system('cd ./data && wget https://cell2location.cog.sanger.ac.uk/nanostringWTA/nanostringWTA_fetailBrain_AnnData_smallestExample.p') os.system('cd ./data && wget https://cell2location.cog.sanger.ac.uk/nanostringWTA/polioudakis2019_meanExpressionProfiles.csv') ``` Load a data: ``` adata_wta = pickle.load(open("data/nanostringWTA_fetailBrain_AnnData_smallestExample.p", "rb" )) ``` In this NanostringWTA run we profiled 288 regions of interest (ROIs) spanning the full depth of the cortex and at both 19pcw and 14pcw. An example is shown in the image below, together with the cell types we expect in each region: ``` Image(filename='../images/GeometricROIs.PNG') ``` Here we load an existing matrix of average gene expression profiles for each cell type expected in our nanostringWTA data (taken from the single-cell RNAseq study Polioudakis et al., Neuron, 2019): ``` meanExpression_sc = pd.read_csv("data/polioudakis2019_meanExpressionProfiles.csv", index_col=0) ``` We need seperate gene probes and negative probes and if available we can also supply nuclei counts. We initialize all of those here: ``` counts_negativeProbes = np.asarray(adata_wta[:,np.array(adata_wta.var_names =='NegProbe-WTX').squeeze()].X) counts_nuclei = np.asarray(adata_wta.obs['nuclei']).reshape(len(adata_wta.obs['nuclei']),1) adata_wta = adata_wta[:,np.array(adata_wta.var_names != 'NegProbe-WTX').squeeze()] ``` As you can see the nuclei counts and negative probes need to be numpy arrays, but the gene probe counts are supplied as an AnnData object. ``` adata_wta.raw = adata_wta ``` Run cell2location: Explanations of the arguments are as follows: <br> 'model_name = cell2location.models.LocationModelWTA' > Here we tell cell2location to use the NanostringWTA model, rather than the standard model. <br> 'use_raw': False > extract the data from adata_wta.X and not from adata_wta.raw.X <br> 'Y_data': counts_negativeProbes > we supply the negative probe information here <br> 'cell_number_prior' and 'cell_number_var_prior': we supply information about nuclei counts here <br> ``` cell2location.run_c2l.run_cell2location(meanExpression_sc, adata_wta, model_name=cell2location.models.LocationModelWTA, train_args={'use_raw': False}, model_kwargs={ "Y_data" : counts_negativeProbes, "cell_number_prior" : {'cells_per_spot': counts_nuclei, 'factors_per_spot': 6, 'combs_per_spot': 3}, "cell_number_var_prior" : {'cells_mean_var_ratio': 1, 'factors_mean_var_ratio': 1, 'combs_mean_var_ratio': 1}}) ``` An anndata object that has the cell2location results included is saved and can be used for further analysis, as in standard cell2location: ``` adata_c2l = sc.read_h5ad('resultsLocationModelWTA_1experiments_16clusters_288locations_15124genes/sp.h5ad') adata_c2l.obs.loc[:,['mean_spot_factors' in c for c in adata_c2l.obs.columns]] ``` We can also plot the same QC metrics as in standard cell2location: ``` from IPython.display import Image Image(filename='resultsLocationModelWTA_1experiments_16clusters_288locations_15124genes/plots/training_history_without_first_20perc.png', width=400) Image(filename='resultsLocationModelWTA_1experiments_16clusters_288locations_15124genes/plots/data_vs_posterior_mean.png', width=400) Image(filename='resultsLocationModelWTA_1experiments_16clusters_288locations_15124genes/plots/evaluate_stability.png', width=400) ``` Finally we have this method to plot cell type abundance in 1D across our coordinate of interest (cortical depth): It makes most sense to plot a subset, for example on one 19pcw slide at one position: ``` colourCode = {'SPN': 'salmon', 'End': 'darkcyan', 'ExDp1': 'deepskyblue', 'ExDp2': 'blue', 'ExM': 'gold', 'ExM-U': 'yellow', 'ExN': 'darkorange', 'InCGE': 'darkgrey', 'InMGE': 'dimgray', 'IP': 'darkviolet', 'Mic': 'indianred', 'OPC': 'lightcoral', 'oRG': 'red', 'Per': 'darkgreen', 'PgG2M': 'rebeccapurple', 'PgS': 'violet', 'vRG': 'lightgreen'} subset_19pcw = [adata_c2l.obs['slide'].iloc[i] == '00MU' and adata_c2l.obs['Radial_position'].iloc[i] == 2 for i in range(len(adata_c2l.obs['Radial_position']))] cell2location.plt.plot_absolute_abundances_1D(adata_c2l, subset = subset_19pcw, saving = False, scaling = 0.15, power = 1, pws = [0,0,100,500,1000,3000,6000], figureSize = (12,8), dimName = 'VCDepth', xlab = 'Cortical Depth', colourCode = colourCode) ``` You can also plot density rather than total numbers: ``` cell2location.plt.plot_density_1D(adata_c2l, subset = subset_19pcw, saving = False, scaling = 0.05, power = 1, pws = [0,0,100,500,1000,3000,6000,10000], figureSize = (12,8), dimName = 'VCDepth', areaName = 'roi_dimension', xlab = 'Cortical Depth', colourCode = colourCode) ```
github_jupyter
``` # hide %load_ext autoreload from nbdev import * # default_exp annotate ``` # Annotate > Tools to support creating and process annotation for samples of Newspaper Navigator data using Label Studio ``` # hide from nbdev.showdoc import * # export from nnanno.core import * # export from tqdm.notebook import trange, tqdm from toolz.itertoolz import count import pandas as pd from pandas import json_normalize import simplejson as json import requests import re from datetime import datetime from glob import glob from pathlib import Path # export import nnanno from typing import Union, Optional, Type ``` ## Annotating Newspaper Navigator data Once you have created a sample of Newspaper Navigator data using `sample`, you might want to annotate it somehow. These annotations may function as the input for a machine learning model or could be used directly to explore images in the newspaper navigator data. The `Examples` section in the documentation shows how annotations can generate training data for machine learning tasks. ## Setup annotation task The bulk of annotation work is outsourced to label studio, which provides a flexible annotations system that supports annotations for various data types, including images and text. This module does a few steps to help process annotations produced through label studio. This module is essentially some suggestions on how you can get label-studio setup with data from Newspaper Navigator. First, we'll create a small sample of images we want to annotate using `sample`. If you have already done this step, you can skip this. ``` # export from nnanno.sample import * sampler = nnSampler() df = sampler.create_sample( 50, "photos", start_year=1910, end_year=1920, year_sample=False ) ``` There are a few ways in which we can use label studio to annotate. For example, we could download images from our sample using `sample.download_sample`. However, if we have a large sample of images, we might want to do some annotating before downloading all of these images locally. Label-studio supports annotating from a URL. We can use this combined with IIIF to annotate images without downloading them all first since IIIF is a flexible interface for getting images. IIIF also gives us flexibility in annotating at a smaller resolution/size before downloading higher-res images. ## Create label studio annotation tasks Label-studio supports a load of different ways of setting up 'tasks'. In this context, a 'task' is an image to be annotated. One way of setting up a task is to import a `JSON` file that includes tasks. To do this, we take an existing sample DataFrame and add column `image`, which contains a IIIF URL. ``` # export def create_label_studio_json( sample: Union[pd.DataFrame, Type[nnSampler]], fname: Union[str, Path, None] = None, original: bool = True, pct: Optional[int] = None, size: Optional[tuple] = None, preserve_asp_ratio: bool = True, ): """create a json file which can be used to upload tasks to label studio""" if fname and Path(fname).exists(): raise FileExistsError(f"{fname} already exists") if fname is None: today = datetime.today() time_stamp = today.strftime("%Y_%d_%m_%H_%M") fname = f"{time_stamp}_tasks.json" if type(sample) == nnanno.sample.nnSampler: try: sample = sample.sample.copy() except AttributeError as e: print(f"{sample} doesn't have a sample associated with it") else: sample = sample.copy() sample["image"] = sample.apply( lambda x: iiif_df_apply( x, original=original, pct=pct, size=size, preserve_asp_ratio=preserve_asp_ratio, ), axis=1, ) label_studio_json = sample.apply(lambda x: x.to_dict(), axis=1).to_list() with open(fname, "w") as f: json.dump(label_studio_json, f, ignore_nan=True) ``` We can pass in either a dataframe or `nnSampler` to `create_label_studio_json`. This is a simple function that will create a `JSON` file that can create 'tasks' in labels studio. In this example, we pass in size parameters. This is used to generate a IIIF URL that will request this size. ``` create_label_studio_json(df, "tasks.json", size=(500, 500)) # hide Path("tasks.json").unlink() ``` This creates a `JSON` file we can use to load tasks into label-studio. ### Importing tasks into label studio To avoid this documentation becoming out of date, I haven't included screenshots etc. However, you can currently (January 2021) create tasks in label studio via the GUI or by passing in tasks through the CLI. For example, to load the tasks and create a template for annotating classifications ```bash label-studio init project_name --template=image_classification --input-path=tasks.json ``` You can then start label-studio and complete the rest of the setup via the GUI. ```bash label-studio start ./project_name ``` ## Setting up labeling For a proper introduction to configuring your labels, consult the label studio [documentation](https://labelstud.io/guide/). One way in which you can setup labels is to use a template as shown above. This template setups an image classification task. There are other [templates](https://labelstud.io/templates/) for different tasks. These templates consist of `XML` templates that define your labels. These templates allow you to define how you want to label your images and share these definitions with others. For example ```xml <View> <Choices name="choice" toName="image" showInLine="true" choice="multiple"> <Choice value="human"/> <Choice value="animal"/> <Choice value="human-structure"/> <Choice value="landscape"/> </Choices> <Image name="image" value="$image"/> </View> ``` You can change many other options in Label-studio. It also includes features such as adding a machine learning backend to support annotations. ### Notes on labelling using IIIF images There are a few things to consider and be aware of when loading images via IIIF in label studio. #### Missing images Occasionally when you are doing your annotations in label studio for IIIF URLs, you will get a missing image error. This is probably because for some reason the IIIF URL has been generated incorrectly for that image, or that image doesn't exist via IIIF. If this happens, you can 'skip' this image in the annotation interface. #### Setting a comfortable size for viewing You can take advantage of the flexibility of IIIF by requesting images to be a specific size when you create the tasks. This also helps speed up the process of loading each image since we often request a smaller sized image to fit it in on a smallish screen comfortably. #### Annotating vs training image size, resolution etc. IF you are annotating labels or classifications, you may decide to annotate at a smaller size or quality and work with a higher quality image when you come to training a model. If you are doing any annotations of pixels or regions of the image, you will want to be careful to make sure these aren't lost if moving between different sizes of the image. ### Exporting and loading annotations from label studio Label studio supports a broad range of annotation tasks which may require particular export formats i.e. COCO or VOC for object detection. Since the processing of these outputs is tasks specific this module only contains functionality to deal with image classification and labeling tasks since these were the tasks covered in the Programming Historian lessons for which this code was originally written. ### Exporting and processing CSV Once you have finished annotating all your images or got too bored of annotating, you can export in various formats, including JSON and CSV. A CSV export is often sufficient for simple tasks and has the additional benefit of having a lower barrier to entry than JSON for people who aren't coders. We'll now process the annotations we generated above and labeled using label studio ``` # export def process_labels(x): try: x = "|".join(eval(x)["choices"]) except: NameError return x # exports def load_annotations_csv(csv: Union[str, Path], kind="classification"): if kind == "classification": df = pd.read_csv(csv, converters={"box": eval}) df["label"] = df["choice"] return df if kind == "label": df = pd.read_csv(csv, converters={"box": eval}) df["label"] = df["choice"].apply(process_labels) return df ``` As you can see above, this code doesn't do much to process the annotations into a DataFrame. The main things to note are the `kind` parameter. The CSV export for labelling tasks includes a column that contains a JSON with the labels. In this case, we use a pandas converter and `eval` and grab the choices, which returns a list of labels. If we look at the columns from the annotation DataFrame we'll see that label studio kept the original metadata. We now have a new column `label` that contains our annotations. We also have a column `choice` containing the original column format from the label studio export, which will be different from the `label` column when processing labelling annotations. ``` annotation_df = load_annotations_csv("test_iiif_anno/label_studio_export.csv") annotation_df.columns # hide assert "choice" in annotation_df.columns ``` We can now do the usual Pandas things to start exploring our annotations further. For example we can see how many of each label option we have ``` annotation_df["choice"].value_counts() ``` ### Downloading the images associated with annotations Once we have some annotations done, we'll often want to get the original images to work locally. This is particularly important if we are planning to train a machine learning model with these images. Although it is possible to train a model using the images from IIIF, since we'll usually be grabbing these images multiple times for each epoch, this isn't particularly efficient and isn't very friendly to the IIIF endpoint. We can use the `sampler.download_sample` method to download our sample; we just pass in our annotation DataFrame a folder we want to download images to and an optional name to save our 'log' of the download. We can also pass in different parameters to request different size etc. of the image. See the `download_sample` docs for more details. ``` sampler.download_sample( "test_iiif_anno/test_dl", df=annotation_df, original=True, json_name="test_dl" ) # hide # test we have a very similar number of images downloaded and in our annotation dataframe # allow for some images to be missing images = list(Path("test_iiif_anno/test_dl").rglob("*.jpg")) test_close(len(images), len(annotation_df), eps=1) ``` ### Moving between local annotation and the cloud ☁ Although 'storage is cheap', it isn't free. One helpful feature of the IIIF annotations workflow is that it allows you to annotate 'locally,' i.e. on a personal computer and then quickly move the information required to download all the images into the cloud without having to pass the images themselves around. This is particularly useful if you will use a service like Google Colab to train a computer vision model, i.e. you don't have the resources to rent GPUs. In the context of working with limited bandwidth, it might also be relatively time-consuming to download a large set of images. However, it might be feasible to get around this by annotating using the IIIF images and then using a service like google Colab when you want to grab the actual images files. Since Colab is running in the cloud with a big internet tube, this should be much more doable even if your internet is limited. Once you have download your images you may want to check if any images weren't able to download. You can do this using the `check_download_df_match` function. ``` # export def check_download_df_match(dl_folder: Union[Path, str], df: pd.DataFrame) -> str: im_count = count( f for f in Path(dl_folder).iterdir() if f.suffix in image_extensions ) if type(df) == pd.core.frame.DataFrame: if len(df) == im_count: print( f"Length of DataFrame {len(df)} and number of images in {dl_folder} {im_count} match", "\U0001F600", ) if len(df) != im_count: print( f"Length of DataFrame {len(df)} and number of images in {dl_folder} {im_count} do not match", "\U0001F615", ) ``` This will let you know if you have a different number of downloaded images compared to the number of rows in the DataFrame. ``` check_download_df_match("test_iiif_anno/test_dl", annotation_df) ``` ## Working with the annotations This will really depend on the framework or library you want to use. In fastai the process is simple since our data matches one of the fastai 'factory' methods for loading data. ### Loading with fastai ``` # slow from fastai.vision.all import * # slow df = pd.read_json("test_iiif_anno/test_dl/test_dl.json") # slow dls = ImageDataLoaders.from_df( df, path="test_iiif_anno/test_dl", fn_col="download_image_path", label_col="choice", item_tfms=Resize(64), bs=4, ) # slow dls.show_batch() # hide [f.unlink() for f in Path("test_iiif_anno/test_dl").iterdir()] Path("test_iiif_anno/test_dl").rmdir() ``` ## Process completions directly Label studio stores annotations as json files so we can work with these directly without using the exports from label studio. This code below shows how to do this but the above approach is likely to be more reliable. ``` # export def load_df(json_file: Union[str, Path]): with open(json_file) as f: data = json.load(f) df = json_normalize(data, record_path=["completions"], meta=["data"]) # df['result'] = df['result'].apply(lambda x: return_choice(x[0]) if len([x][0]) ==1 else x) df["result"] = df["result"].apply( lambda x: x[0]["value"]["choices"] if len([x][0]) == 1 else x ) return df # export def load_completions(path: Union[str, Path]): filenames = glob(f"{path}/completions/*.json") dataframes = [load_df(f) for f in filenames] return pd.concat(dataframes) # slow df = load_completions("../ph/ads/ad_annotations/") df.head(1) # slow # df = load_completions('../ph/photos/multi_label/') # df.head(1) # exporti def _df_to_csv(df, out_fn): df[["data", "result"]].to_csv( out_fn, header=[ "file", "label", ], index=False, ) # exporti def _df_to_json(df, out_fn): df[["data", "value.choices"]].to_json(out_fn) # exporti def _df_to_pkl(df, out_fn): df.to_pickle(out_fn) # exporti def get_og_filepath(x): """ Transforms a filepaths from processed ImageStudio format back to the Orginal Newspaper Navigator filepath format """ b, m, e = re.split("(_data_)", x) m = m.replace("_", "/") e = re.split("(\d{3}_\d{1}_\d{2}.jpg)", e) return b + m + e[0].replace("_", "/") + e[1] # export def anno_sample_merge( sample_df: pd.DataFrame, annotation_df: pd.DataFrame ) -> pd.DataFrame: """anno_sample_merge merges a DataFrame containing a sample from Newspaper Navigator and a DataFrame containing annotations Parameters ---------- sample_df : pd.DataFrame A Pandas DataFrame which holds a sample from Newspaper Navigator Generated by `sample.nnSample()` annotation_df : pd.DataFrame A pandas DataFrame containing annotations loaded via the `annotate.nnAnnotations` class Returns ------- pd.DataFrame A new DataFrame which merges the two input DataFrames """ sample_df, annotation_df = sample_df.copy(), annotation_df.copy() annotation_df["id"] = annotation_df["data"].map(lambda x: get_og_filepath(x)) return sample_df.merge(annotation_df, left_on="filepath", right_on="id") sample_df = pd.read_csv("../ph/ads/sample.csv", index_col=0) # export class nnAnnotations: def __init__(self, df): self.annotation_df = df self.labels = df["result"].unique() self.label_counts = df["result"].value_counts() def __repr__(self): return f"{self.__class__.__name__}" f" #annotations:{len(self.annotation_df)}" @classmethod def from_completions(cls, path, kind, drop_dupes=True, sample_df=None): df = load_completions(path) df = df.reset_index(drop=True) # add index df["data"] = df["data"].map(lambda x: x["image"]) df["data"] = df["data"].map(lambda x: x.split("?")[0]) df["data"] = df["data"].apply(lambda x: Path(x).name) if any( df["data"].str.contains("-") ): # removes labelstudio hash from data loaded via web interface df["data"] = df["data"].str.split("-", expand=True)[1] if drop_dupes: df = df.drop_duplicates(subset="data", keep="last") if kind == "classification": empty_rows = df[df["result"].apply(lambda x: len(x) == 0)].index df = df.drop(empty_rows) df["result"] = df["result"].map(lambda x: x[0]) if kind == "label": df["result"] = df["result"].map( lambda x: "|".join(map(str, x)) if len(x) >= 1 else x ) df["result"] = df["result"].map(lambda x: "" if len(x) == 0 else x) return cls(df) def merge_sample(self, sample_df): self.merged_df = anno_sample_merge(sample_df, self.annotation_df) def export_merged(self, out_fn): self.merged_df.to_csv(out_fn) def export_annotations(self, out_fn): df = self.annotation_df if not Path(out_fn).exists(): Path(out_fn).touch() suffix = Path(out_fn).suffix if suffix == ".csv": _df_to_csv(df, out_fn) if suffix == ".json": _df_to_json(df, out_fn) if suffix == ".pkl": _df_to_pkl(df, out_fn) show_doc(nnAnnotations) show_doc(nnAnnotations.from_completions) annotations = nnAnnotations.from_completions( "../ph/ads/ad_annotations/", "classification" ) annotations annotations.labels annotations.label_counts show_doc(nnAnnotations.merge_sample) annotations.merge_sample(sample_df) annotations.merged_df.head(2) show_doc(nnAnnotations.export_merged) annotations.export_merged("testmerge.csv") show_doc(nnAnnotations.from_completions) # hide Path("testmerge.csv").unlink() annotations = nnAnnotations.from_completions( "../ph/ads/ad_annotations/", "classification" ) annotations.annotation_df.head(2) from nbdev.export import notebook2script notebook2script() ```
github_jupyter
# Scikits DAE solver In this notebook, we show some examples of solving a DAE model using the Scikits DAE solver, which interfaces with the [SUNDIALS](https://computation.llnl.gov/projects/sundials) library via the [scikits-odes](https://scikits-odes.readthedocs.io/en/latest/) Python interface ``` # Setup import pybamm import tests import numpy as np import os import matplotlib.pyplot as plt from pprint import pprint os.chdir(pybamm.__path__[0]+'/..') # Create solver dae_solver = pybamm.ScikitsDaeSolver() ``` ## Integrating DAEs In the simplest case, the `integrate` method of the DAE solver needs to be passed a function that returns residuals given `(t,y,ydot)`, initial conditions `y0`, and a time `t_eval` at which to return the solution: ``` def exponential_decay_dae(t, y, ydot): return [-y[0] - ydot[0], 2 * y[0] - y[1]] # Solve y0 = np.array([1, 2]) t_eval = np.linspace(0, 5, 20) solution = dae_solver.integrate(exponential_decay_dae, y0, t_eval) # Plot t_fine = np.linspace(0,t_eval[-1],1000) def plot(t_sol, y_sol): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13,4)) ax1.plot(t_fine, np.exp(-t_fine), t_sol, y_sol[0], "o") ax1.set_xlabel("t") ax1.legend(["exp(-t)", "y_sol[0]"], loc="best") ax2.plot(t_fine, 2*np.exp(-t_fine), t_sol, y_sol[1], "o") ax2.set_xlabel("t") ax2.legend(["2*exp(-t)", "y_sol[1]"], loc="best") plt.tight_layout() plt.show() plot(solution.t, solution.y) ``` We can also provide the mass matrix and Jacobian (both must be provided together) ``` def jacobian(t, y): return np.array([[-1.0, 0.0], [2, -1]]) mass_matrix = np.array([[1, 0], [0, 0]]) solution = dae_solver.integrate(exponential_decay_dae, y0, t_eval, jacobian=jacobian, mass_matrix=mass_matrix) plot(solution.t, solution.y) ``` Finally, we can specify events at which the solver should terminate ``` def y1_equal_0pt2(t, y): return y[1] - 0.2 # Solve solution = dae_solver.integrate(exponential_decay_dae, y0, t_eval, events=[y1_equal_0pt2]) # Plot fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13,4)) ax1.plot(t_fine, np.exp(-t_fine), solution.t, solution.y[0], "o") ax1.set_xlabel("t") ax1.legend(["exp(-t)", "y_sol[0]"], loc="best") ax2.plot(t_fine, 2*np.exp(-t_fine), solution.t, solution.y[1], "o", t_fine, 0.2 * np.ones_like(t_fine), "k") ax2.set_xlabel("t") ax2.legend(["2*exp(-t)", "y_sol[1]", "y=0.2"], loc="best") plt.tight_layout() plt.show() ``` ## Finding consistent initial conditions The solver will fail if initial conditions that are inconsistent with the algebraic equations are provided. ``` y0_bad = np.array([1, 3]) print("algebraic residual at (0, y0_bad, [0]) is {}".format(exponential_decay_dae(0, y0_bad, [0])[1])) try: solution = dae_solver.integrate(exponential_decay_dae, y0_bad, t_eval) except pybamm.SolverError as e: print(e) ``` However, we can use `calculate_consistent_initial_conditions` to obtain consistent initial conditions, starting from a guess of bad initial conditions, using a simple root-finding algorithm. ``` def exponential_decay_dae_rhs(t, y): return np.array([exponential_decay_dae(t, y, [0])[0]]) def exponential_decay_dae_algebraic(t, y): return np.array([exponential_decay_dae(t, y, [0])[1]]) y0_fixed = dae_solver.calculate_consistent_initial_conditions( exponential_decay_dae_rhs, exponential_decay_dae_algebraic, y0_bad ) print("y0_fixed = {}\n".format(y0_fixed)) print("algebraic residual at (0, y0_fixed, [0]) is {}".format(exponential_decay_dae(0, y0_fixed, [0])[1])) solution = dae_solver.integrate(exponential_decay_dae, y0_fixed, t_eval) plot(solution.t, solution.y) ``` ## Solving a model The `solve` method is common to all DAE solvers. It takes a model, which contains all of the above information (residuals function, initial conditions and optionally jacobian, mass matrix, events), and a time to evaluate `t_eval`, and calls `integrate` to solve this model. ``` # Create model model = pybamm.BaseModel() u = pybamm.Variable("u") v = pybamm.Variable("v") model.rhs = {u: -v} # du/dt = -v model.algebraic = {v: 2 * u - v} # 2*v = u model.initial_conditions = {u: 1, v: 1} # bad initial conditions, solver fixes model.events['v=0.2'] = v - 0.2 model.variables = {"u": u, "v": v} # Discretise using default discretisation disc = pybamm.Discretisation() disc.process_model(model) # Solve ################################# t_eval = np.linspace(0, 2, 30) solution = dae_solver.solve(model, t_eval) ######################################### # Post-process, so that u and v can be called at any time t (using interpolation) t_sol, y_sol = solution.t, solution.y u = pybamm.ProcessedVariable(model.variables["u"], t_sol, y_sol) v = pybamm.ProcessedVariable(model.variables["v"], t_sol, y_sol) # Plot t_fine = np.linspace(0,t_eval[-1],1000) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13,4)) ax1.plot(t_fine, np.exp(-2 * t_fine), t_sol, u(t_sol), "o") ax1.set_xlabel("t") ax1.legend(["exp(-2*t)", "u"], loc="best") ax2.plot(t_fine, 2 * np.exp(-2 * t_fine), t_sol, v(t_sol), "o", t_fine, 0.2 * np.ones_like(t_fine), "k") ax2.set_xlabel("t") ax2.legend(["2*exp(-2*t)", "v", "v = 0.2"], loc="best") plt.tight_layout() plt.show() ``` Note that the discretisation or solver will have created the mass matrix and jacobian algorithmically, using the expression tree, so we do not need to calculate and input these manually.
github_jupyter
# This notebook plots metrics throughout training (Supplementary Fig. 3) ``` import os import numpy as np from six.moves import cPickle import matplotlib.pyplot as plt %matplotlib inline from tensorflow import keras import helper from tfomics import utils, explain, metrics num_trials = 10 model_names = ['cnn-deep', 'cnn-2', 'cnn-50'] activations = ['relu', 'exponential', 'sigmoid', 'tanh', 'softplus', 'linear', 'elu', 'shift_scale_relu', 'shift_scale_tanh', 'shift_scale_sigmoid', 'exp_relu'] # save path results_path = utils.make_directory('../../results', 'task1') # load dataset data_path = '../../data/synthetic_dataset.h5' data = helper.load_data(data_path) x_train, y_train, x_valid, y_valid, x_test, y_test = data # pickle results file_path = os.path.join(results_path, "task1_history.pickle") with open(file_path, 'rb') as f: results = cPickle.load(f) num_rows = 5 index = 0 ylabel = 'Training loss' yticks = [0.1, 0.2, 0.3, 0.4, 0.5] ylim = [0.1,.5] fig = plt.figure(figsize=(9,10)) activation = 'relu' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,1); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.title('CNN-deep', fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) plt.xlim([0,100]) ax.set_xticklabels([]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,2) for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.title('CNN-2', fontsize=12) ax.set_yticklabels([]) ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,3); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.title('CNN-50', fontsize=12) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax.set_xticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Relu', fontsize=12) activation = 'exponential' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,4); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,5) for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,6); for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Exponential', fontsize=12) activation = 'shift_scale_relu' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,7); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,8) for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,9); for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Modified-Sigmoid', fontsize=12) activation = 'shift_scale_sigmoid' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,10); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,11) for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,12); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax.set_xticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Modified-Relu', fontsize=12) activation = 'shift_scale_tanh' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,13); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.xlabel('Epoch', fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) plt.xticks([0, 25, 50, 75, 100], fontsize=12) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,14) for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.xlabel('Epoch', fontsize=12) plt.xticks([0, 25, 50, 75, 100], fontsize=12) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,15); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.xlabel('Epoch', fontsize=12) plt.xticks([0, 25, 50, 75, 100], fontsize=12) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Modified-Tanh', fontsize=12); outfile = os.path.join(results_path, 'training_loss.pdf') fig.savefig(outfile, format='pdf', dpi=200, bbox_inches='tight') num_rows = 5 index = 3 ylabel = 'Validation loss' yticks = [0.1, 0.2, 0.3, 0.4, 0.5] ylim = [0.1,.5] fig = plt.figure(figsize=(9,10)) activation = 'relu' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,1); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.title('CNN-deep', fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,2) for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.title('CNN-2', fontsize=12) ax.set_yticklabels([]) ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,3); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.title('CNN-50', fontsize=12) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax.set_xticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Relu', fontsize=12) activation = 'exponential' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,4); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,5) for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,6); for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Exponential', fontsize=12) activation = 'shift_scale_relu' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,7); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,8) for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,9); for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Modified-Sigmoid', fontsize=12) activation = 'shift_scale_sigmoid' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,10); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,11) for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,12); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax.set_xticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Modified-Relu', fontsize=12) activation = 'shift_scale_tanh' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,13); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.xlabel('Epoch', fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) plt.xticks([0, 25, 50, 75, 100], fontsize=12) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,14) for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.xlabel('Epoch', fontsize=12) plt.xticks([0, 25, 50, 75, 100], fontsize=12) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,15); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.xlabel('Epoch', fontsize=12) plt.xticks([0, 25, 50, 75, 100], fontsize=12) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Modified-Tanh', fontsize=12); outfile = os.path.join(results_path, 'validation_loss.pdf') fig.savefig(outfile, format='pdf', dpi=200, bbox_inches='tight') num_rows = 5 index = 5 ylabel = 'AUPR' yticks = [0.2, 0.4, 0.6, 0.8, 1.0] ylim = [0.1,1] fig = plt.figure(figsize=(9,10)) activation = 'relu' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,1); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.title('CNN-deep', fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,2) for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.title('CNN-2', fontsize=12) ax.set_yticklabels([]) ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,3); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.title('CNN-50', fontsize=12) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax.set_xticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Relu', fontsize=12) activation = 'exponential' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,4); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,5) for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,6); for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Exponential', fontsize=12) activation = 'shift_scale_relu' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,7); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,8) for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,9); for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Modified-Sigmoid', fontsize=12) activation = 'shift_scale_sigmoid' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,10); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) ax.set_xticklabels([]) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,11) for i in range(10): ax.plot(np.array(history[i])[index,:]); ax.set_xticklabels([]) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,12); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax.set_xticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Modified-Relu', fontsize=12) activation = 'shift_scale_tanh' history = results['cnn-deep'][activation] ax = plt.subplot(num_rows,3,13); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.ylabel(ylabel, fontsize=12) plt.xlabel('Epoch', fontsize=12) plt.yticks(yticks, fontsize=12) plt.ylim(ylim) plt.xticks([0, 25, 50, 75, 100], fontsize=12) plt.xlim([0,100]) history = results['cnn-2'][activation] ax = plt.subplot(num_rows,3,14) for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.xlabel('Epoch', fontsize=12) plt.xticks([0, 25, 50, 75, 100], fontsize=12) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) history = results['cnn-50'][activation] ax = plt.subplot(num_rows,3,15); for i in range(10): ax.plot(np.array(history[i])[index,:]); plt.xlabel('Epoch', fontsize=12) plt.xticks([0, 25, 50, 75, 100], fontsize=12) plt.xlim([0,100]) plt.ylim(ylim) ax.set_yticklabels([]) ax2 = ax.twinx() ax2.set_yticks([]) ax2.set_ylabel('Modified-Tanh', fontsize=12); outfile = os.path.join(results_path, 'aupr.pdf') fig.savefig(outfile, format='pdf', dpi=200, bbox_inches='tight') ```
github_jupyter
# Batch processing with Argo Worfklows In this notebook we will dive into how you can run batch processing with Argo Workflows and Seldon Core. Dependencies: * Seldon core installed as per the docs with an ingress * Minio running in your cluster to use as local (s3) object storage * Argo Workfklows installed in cluster (and argo CLI for commands) ### Setup #### Install Seldon Core Use the notebook to [set-up Seldon Core with Ambassador or Istio Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html). Note: If running with KIND you need to make sure do follow [these steps](https://github.com/argoproj/argo/issues/2376#issuecomment-595593237) as workaround to the `/.../docker.sock` known issue. #### Set up Minio in your cluster Use the notebook to [set-up Minio in your cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/minio_setup.html). #### Copy the Minio Secret to namespace We need to re-use the minio secret for the batch job, so this can be done by just copying the minio secret created in the `minio-system` The command below just copies the secred with the name "minio" from the minio-system namespace to the default namespace. ``` !kubectl get secret minio -n minio-system -o json | jq '{apiVersion,data,kind,metadata,type} | .metadata |= {"annotations", "name"}' | kubectl apply -n default -f - ``` #### Install Argo Workflows You can follow the instructions from the official [Argo Workflows Documentation](https://github.com/argoproj/argo#quickstart). You also need to make sure that argo has permissions to create seldon deployments - for this you can create a role: ``` %%writefile role.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: workflow rules: - apiGroups: - "" resources: - pods verbs: - "*" - apiGroups: - "apps" resources: - deployments verbs: - "*" - apiGroups: - "" resources: - pods/log verbs: - "*" - apiGroups: - machinelearning.seldon.io resources: - "*" verbs: - "*" !!kubectl apply -f role.yaml ``` A service account: ``` !kubectl create serviceaccount workflow ``` And a binding ``` !kubectl create rolebinding workflow --role=workflow --serviceaccount=default:workflow ``` ### Create some input for our model We will create a file that will contain the inputs that will be sent to our model ``` mkdir -p assets/ with open("assets/input-data.txt", "w") as f: for i in range(10000): f.write('[[1, 2, 3, 4]]\n') ``` #### Check the contents of the file ``` !wc -l assets/input-data.txt !head assets/input-data.txt ``` #### Upload the file to our minio ``` !mc mb minio-seldon/data !mc cp assets/input-data.txt minio-seldon/data/ ``` #### Create Argo Workflow In order to create our argo workflow we have made it simple so you can leverage the power of the helm charts. Before we dive into the contents of the full helm chart, let's first give it a try with some of the settings. We will run a batch job that will set up a Seldon Deployment with 10 replicas and 100 batch client workers to send requests. ``` !helm template seldon-batch-workflow helm-charts/seldon-batch-workflow/ \ --set workflow.name=seldon-batch-process \ --set seldonDeployment.name=sklearn \ --set seldonDeployment.replicas=10 \ --set seldonDeployment.serverWorkers=1 \ --set seldonDeployment.serverThreads=10 \ --set batchWorker.workers=100 \ --set batchWorker.payloadType=ndarray \ --set batchWorker.dataType=data \ | argo submit --serviceaccount workflow - !argo list !argo get seldon-batch-process !argo logs -w seldon-batch-process || argo logs seldon-batch-process # The 2nd command is for argo 2.8+ ``` ### Check output in object store We can now visualise the output that we obtained in the object store. First we can check that the file is present: ``` import json wf_arr = !argo get seldon-batch-process -o json wf = json.loads("".join(wf_arr)) WF_ID = wf["metadata"]["uid"] print(f"Workflow ID is {WF_ID}") !mc ls minio-seldon/data/output-data-"$WF_ID".txt ``` Now we can output the contents of the file created using the `mc head` command. ``` !mc cp minio-seldon/data/output-data-"$WF_ID".txt assets/output-data.txt !head assets/output-data.txt !argo delete seldon-batch-process ```
github_jupyter
# Transformer Network Application: Named-Entity Recognition Welcome to Week 4's first ungraded lab. In this notebook you'll explore one application of the transformer architecture that you built in the previous assignment. **After this assignment you'll be able to**: * Use tokenizers and pre-trained models from the HuggingFace Library. * Fine-tune a pre-trained transformer model for Named-Entity Recognition ## Table of Contents - [Packages](#0) - [1 - Named-Entity Recogniton to Process Resumes](#1) - [1.1 - Data Cleaning](#1-1) - [1.2 - Padding and Generating Tags](#1-2) - [1.3 - Tokenize and Align Labels with 🤗 Library](#1-3) - [Exercise 1 - tokenize_and_align_labels](#ex-1) - [1.4 - Optimization](#1-4) <a name='0'></a> ## Packages Run the following cell to load the packages you'll need. ``` import pandas as pd import tensorflow as tf import json import random import logging import re ``` <a name='1'></a> ## 1 - Named-Entity Recogniton to Process Resumes When faced with a large amount of unstructured text data, named-entity recognition (NER) can help you detect and classify important information in your dataset. For instance, in the running example "Jane vists Africa in September", NER would help you detect "Jane", "Africa", and "September" as named-entities and classify them as person, location, and time. * You will use a variation of the Transformer model you built in the last assignment to process a large dataset of resumes. * You will find and classify relavent information such as the companies the applicant worked at, skills, type of degree, etc. <a name='1-1'></a> ### 1.1 - Dataset Cleaning In this assignment you will optimize a Transformer model on a dataset of resumes. Take a look at how the data you will be working with are structured. ``` df_data = pd.read_json("ner.json", lines=True) df_data = df_data.drop(['extras'], axis=1) df_data['content'] = df_data['content'].str.replace("\n", " ") df_data.head() df_data.iloc[0]['annotation'] def mergeIntervals(intervals): sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0]) merged = [] for higher in sorted_by_lower_bound: if not merged: merged.append(higher) else: lower = merged[-1] if higher[0] <= lower[1]: if lower[2] is higher[2]: upper_bound = max(lower[1], higher[1]) merged[-1] = (lower[0], upper_bound, lower[2]) else: if lower[1] > higher[1]: merged[-1] = lower else: merged[-1] = (lower[0], higher[1], higher[2]) else: merged.append(higher) return merged def get_entities(df): entities = [] for i in range(len(df)): entity = [] for annot in df['annotation'][i]: try: ent = annot['label'][0] start = annot['points'][0]['start'] end = annot['points'][0]['end'] + 1 entity.append((start, end, ent)) except: pass entity = mergeIntervals(entity) entities.append(entity) return entities df_data['entities'] = get_entities(df_data) df_data.head() def convert_dataturks_to_spacy(dataturks_JSON_FilePath): try: training_data = [] lines=[] with open(dataturks_JSON_FilePath, 'r') as f: lines = f.readlines() for line in lines: data = json.loads(line) text = data['content'].replace("\n", " ") entities = [] data_annotations = data['annotation'] if data_annotations is not None: for annotation in data_annotations: #only a single point in text annotation. point = annotation['points'][0] labels = annotation['label'] # handle both list of labels or a single label. if not isinstance(labels, list): labels = [labels] for label in labels: point_start = point['start'] point_end = point['end'] point_text = point['text'] lstrip_diff = len(point_text) - len(point_text.lstrip()) rstrip_diff = len(point_text) - len(point_text.rstrip()) if lstrip_diff != 0: point_start = point_start + lstrip_diff if rstrip_diff != 0: point_end = point_end - rstrip_diff entities.append((point_start, point_end + 1 , label)) training_data.append((text, {"entities" : entities})) return training_data except Exception as e: logging.exception("Unable to process " + dataturks_JSON_FilePath + "\n" + "error = " + str(e)) return None def trim_entity_spans(data: list) -> list: """Removes leading and trailing white spaces from entity spans. Args: data (list): The data to be cleaned in spaCy JSON format. Returns: list: The cleaned data. """ invalid_span_tokens = re.compile(r'\s') cleaned_data = [] for text, annotations in data: entities = annotations['entities'] valid_entities = [] for start, end, label in entities: valid_start = start valid_end = end while valid_start < len(text) and invalid_span_tokens.match( text[valid_start]): valid_start += 1 while valid_end > 1 and invalid_span_tokens.match( text[valid_end - 1]): valid_end -= 1 valid_entities.append([valid_start, valid_end, label]) cleaned_data.append([text, {'entities': valid_entities}]) return cleaned_data data = trim_entity_spans(convert_dataturks_to_spacy("ner.json")) from tqdm.notebook import tqdm def clean_dataset(data): cleanedDF = pd.DataFrame(columns=["setences_cleaned"]) sum1 = 0 for i in tqdm(range(len(data))): start = 0 emptyList = ["Empty"] * len(data[i][0].split()) numberOfWords = 0 lenOfString = len(data[i][0]) strData = data[i][0] strDictData = data[i][1] lastIndexOfSpace = strData.rfind(' ') for i in range(lenOfString): if (strData[i]==" " and strData[i+1]!=" "): for k,v in strDictData.items(): for j in range(len(v)): entList = v[len(v)-j-1] if (start>=int(entList[0]) and i<=int(entList[1])): emptyList[numberOfWords] = entList[2] break else: continue start = i + 1 numberOfWords += 1 if (i == lastIndexOfSpace): for j in range(len(v)): entList = v[len(v)-j-1] if (lastIndexOfSpace>=int(entList[0]) and lenOfString<=int(entList[1])): emptyList[numberOfWords] = entList[2] numberOfWords += 1 cleanedDF = cleanedDF.append(pd.Series([emptyList], index=cleanedDF.columns ), ignore_index=True ) sum1 = sum1 + numberOfWords return cleanedDF cleanedDF = clean_dataset(data) ``` Take a look at your cleaned dataset and the categories the named-entities are matched to, or 'tags'. ``` cleanedDF.head() ``` <a name='1-2'></a> ### 1.2 - Padding and Generating Tags Now, it is time to generate a list of unique tags you will match the named-entities to. ``` unique_tags = set(cleanedDF['setences_cleaned'].explode().unique())#pd.unique(cleanedDF['setences_cleaned'])#set(tag for doc in cleanedDF['setences_cleaned'].values.tolist() for tag in doc) tag2id = {tag: id for id, tag in enumerate(unique_tags)} id2tag = {id: tag for tag, id in tag2id.items()} unique_tags ``` Next, you will create an array of tags from your cleaned dataset. Oftentimes your input sequence will exceed the maximum length of a sequence your network can process. In this case, your sequence will be cut off, and you need to append zeroes onto the end of the shortened sequences using this [Keras padding API](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences). ``` from tensorflow.keras.preprocessing.sequence import pad_sequences MAX_LEN = 512 labels = cleanedDF['setences_cleaned'].values.tolist() tags = pad_sequences([[tag2id.get(l) for l in lab] for lab in labels], maxlen=MAX_LEN, value=tag2id["Empty"], padding="post", dtype="long", truncating="post") tags ``` <a name='1-3'></a> ### 1.3 - Tokenize and Align Labels with 🤗 Library Before feeding the texts to a Transformer model, you will need to tokenize your input using a [🤗 Transformer tokenizer](https://huggingface.co/transformers/main_classes/tokenizer.html). It is crucial that the tokenizer you use must match the Transformer model type you are using! In this exercise, you will use the 🤗 [DistilBERT fast tokenizer](https://huggingface.co/transformers/model_doc/distilbert.html), which standardizes the length of your sequence to 512 and pads with zeros. Notice this matches the maximu length you used when creating tags. ``` from transformers import DistilBertTokenizerFast #, TFDistilBertModel tokenizer = DistilBertTokenizerFast.from_pretrained('tokenizer/') ``` Transformer models are often trained by tokenizers that split words into subwords. For instance, the word 'Africa' might get split into multiple subtokens. This can create some misalignment between the list of tags for the dataset and the list of labels generated by the tokenizer, since the tokenizer can split one word into several, or add special tokens. Before processing, it is important that you align the lists of tags and the list of labels generated by the selected tokenizer with a `tokenize_and_align_labels()` function. <a name='ex-1'></a> ### Exercise 1 - tokenize_and_align_labels Implement `tokenize_and_align_labels()`. The function should perform the following: * The tokenizer cuts sequences that exceed the maximum size allowed by your model with the parameter `truncation=True` * Aligns the list of tags and labels with the tokenizer `word_ids` method returns a list that maps the subtokens to the original word in the sentence and special tokens to `None`. * Set the labels of all the special tokens (`None`) to -100 to prevent them from affecting the loss function. * Label of the first subtoken of a word and set the label for the following subtokens to -100. ``` label_all_tokens = True def tokenize_and_align_labels(tokenizer, examples, tags): tokenized_inputs = tokenizer(examples, truncation=True, is_split_into_words=False, padding='max_length', max_length=512) labels = [] for i, label in enumerate(tags): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label[word_idx]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label[word_idx] if label_all_tokens else -100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs ``` Now that you have tokenized inputs, you can create train and test datasets! ``` test = tokenize_and_align_labels(tokenizer, df_data['content'].values.tolist(), tags) train_dataset = tf.data.Dataset.from_tensor_slices(( test['input_ids'], test['labels'] )) test['labels'][0] ``` <a name='1-4'></a> ### 1.4 - Optimization Fantastic! Now you can finally feed your data into into a pretrained 🤗 model. You will optimize a DistilBERT model, which matches the tokenizer you used to preprocess your data. Try playing around with the different hyperparamters to improve your results! ``` from transformers import TFDistilBertForTokenClassification model = TFDistilBertForTokenClassification.from_pretrained('model/', num_labels=len(unique_tags)) optimizer = tf.keras.optimizers.Adam(learning_rate=1e-5) model.compile(optimizer=optimizer, loss=model.compute_loss, metrics=['accuracy']) # can also use any keras loss fn model.fit(train_dataset.shuffle(1000).batch(16), epochs=3, batch_size=16) ``` ### Congratulations! #### Here's what you should remember - Named-entity recognition (NER) detects and classifies named-entities, and can help process resumes, customer reviews, browsing histories, etc. - You must preprocess text data with the corresponding tokenizer to the pretrained model before feeding your input into your Transformer model
github_jupyter
The tutorials use PyTorch. You will need to load the following dependencies. ``` # This specific version of torchvision is needed to download the mnist set !pip install torchvision==0.9.1 torch==1.8.0 import random import PIL import imageio import matplotlib.pyplot as plt import numpy as np import skimage.transform import torch import torch.nn as nn import torch.utils.data import torchvision from torchvision import datasets, transforms from IPython import display ``` The code below may be helpful in visualizing PyTorch tensors as images. ``` %matplotlib inline def show(img): """Show PyTorch tensor img as an image in matplotlib.""" npimg = img.cpu().detach().numpy() plt.imshow(np.transpose(npimg, (1, 2, 0)), interpolation='nearest') plt.grid(False) plt.gca().axis('off') def display_thumb(img): display.display(transforms.Resize(128)(img)) device = 'cuda' if torch.cuda.is_available() else 'cpu' ``` ## First tutorial: In the first tutorial, we are going to train a logistic regressor on the MNIST dataset of handwritten digits. Next, we will turn this logistic regressor into a non-linear convolutional network. The following code will load the MNIST dataset. Run it and inspect some of the images and their labels to confirm they are correct. ``` # Load the training and test dataset. mnist_train = datasets.MNIST('/tmp/mnist', train=True, download=True) mnist_test = datasets.MNIST('/tmp/mnist', train=False, download=True) # Show a random image and the corresponding target. img, target = mnist_train[0] print('Label of image:', mnist_train.classes[target]) img ``` Next, we create a PyTorch dataloader for the MNIST dataset. ``` # This ensures the MNIST dataset produces PyTorch tensors. mnist_train.transform = transforms.ToTensor() mnist_test.transform = transforms.ToTensor() # Size of the batches the data loader will produce. batch_size = 64 # This creates the dataloaders. train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False) ``` Next, implement a logistic regression model in PyTorch. Note that a logistic regressor uses a linear transformation of the input. ``` class LogisticRegression(nn.Module): """Linear logistic regression model.""" def __init__(self, input_size, num_classes): super().__init__() ########################################################################### # TODO: Instantiate the layer here. # ########################################################################### def forward(self, x): ########################################################################### # TODO: Apply the layer to the input. # ########################################################################### ``` We will use the following generic training loop for a PyTorch model. ``` def train(model, criterion, data_loader, optimizer, num_epochs): """Simple training loop for a PyTorch model.""" # Make sure model is in training mode. model.train() # Move model to the device. model.to(device) # Exponential moving average of the loss. ema_loss = None # Loop over epochs. for epoch in range(num_epochs): # Loop over data. for batch_idx, (data, target) in enumerate(data_loader): data = data.to(device) target = target.to(device) # Forward pass. output = model(data) loss = criterion(output, target) # Backward pass. optimizer.zero_grad() loss.backward() optimizer.step() # NOTE: It is important to call .item() on the loss before summing. if ema_loss is None: ema_loss = loss.item() else: ema_loss += (loss.item() - ema_loss) * 0.01 # Print out progress. if batch_idx % 500 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(data_loader.dataset), 100. * batch_idx / len(data_loader), ema_loss), ) ``` **Question:** For the model you are currently using, is there any difference between using the model in `train` mode or using it in `eval` mode? Create an SGD optimizer and us it to train the logistic regressor on the MNIST training data for a few epochs. What loss function do you need to use? ``` # Create model, criterion, and optimizer. model = LogisticRegression(28 * 28, 10) ########################################################################### # TODO: Create criterion and optimize here. # ########################################################################### criterion = None optimizer = None # Train the model. If everything is correct, the loss should go below 0.45. train(model, criterion, train_loader, optimizer, num_epochs=5) ``` Visualize the weights of the trained model. What do you see? Why? ``` assert model.linear.weight.shape == (10, 28 * 28) show(torchvision.utils.make_grid( model.linear.weight.view(10, 1, 28, 28), normalize=True, nrow=5, )) ``` Use the following function to measure the test accuracy of your trained model. ``` def test(model, data_loader): """Measures the accuracy of a model on a data set.""" # Make sure the model is in evaluation mode. model.eval() correct = 0 # We do not need to maintain intermediate activations while testing. with torch.no_grad(): # Loop over test data. for data, target in data_loader: # Forward pass. output = model(data.to(device)) # Get the label corresponding to the highest predicted probability. pred = output.argmax(dim=1, keepdim=True) # Count number of correct predictions. correct += pred.cpu().eq(target.view_as(pred)).sum().item() # Print test accuracy. print('Accuracy: {}/{} ({:.0f}%)\n'.format( correct, len(data_loader.dataset), 100. * correct / len(data_loader.dataset)), ) # Accuracy should be around 90%. test(model, test_loader) ``` **Question:** To have the logistic regressor output probabilities, they need to be processed through a softmax layer. Implement a softmax layer yourself. What numerical issues may arise in this layer? How can you solve them? Use the testing code to confirm you implemented it correctly. ``` def bad_softmax(logits): """Computes softmax in a naive manner.""" probs = logits.exp() probs /= probs.sum(-1, keepdim=True) return probs def good_softmax(logits): """Computes softmax in a numerically safe manner.""" ########################################################################### # TODO: Implement a more stable way to compute softmax # ########################################################################### return probs # Test the new softmax layer. logits = torch.rand((1, 20)) + 100 print(bad_softmax(logits).sum(), good_softmax(logits).sum()) # by definition, the correct value is 1 ``` Because of numerical issues like the one you just experiences, PyTorch code typically uses a `LogSoftmax` layer. **Question [optional]:** PyTorch automatically computes the backpropagation gradient of a module for you. However, it can be instructive to derive and implement your own backward function. Try and implement the backward function for your softmax module and confirm that it is correct. ## Convolutions and images The spatial dimensions of the ouput image (width and height) depend on the spatial dimensions of the input image, kernel_size, padding, and striding. In order to build efficient convolutional networks, it's important to understand what the sizes are after after each convolutional layer. In this exersise you will derive the dependency between input and output image sizes. For the sake of simplicity we assume that the input tensor is _square_, i.e., width = height = image_size. We will use the nn.Conv2d layer here. We have not yet discussed what a convolutional layer is yet, but if you set the first two parameters (input channels and output channels) to 1, then this defines a basic convolution. If your code is correct, you should see 'OK'. ``` def compute_conv_output_size(image_size, kernel_size, padding, stride): ########################################################################### # Add code that computes the size of the image after a conv layer. # ########################################################################### return output_size # Compare the size of the output of nn.Conv2d with compute_convnet_output_size. for image_size in range(5, 21, 1): # Shape: batch x channels x height x width. input_tensor = torch.zeros((1, 1, image_size, image_size)) for kernel_size in 2, 3, 5, 7: for padding in 0, 5: for stride in 1, 2, 3, 4: if kernel_size >= image_size: continue output_tensor = nn.Conv2d(1, 1, kernel_size, stride, padding)(input_tensor) output_size = output_tensor.size(2) predicted_output_size = compute_conv_output_size( image_size, kernel_size, padding, stride) assert output_size == predicted_output_size, ( f'ERROR: the real size is {output_size},' f' but got {predicted_output_size}.' f'\nimage_size={image_size}' f' kernel_size={kernel_size}' f' padding={padding}' f' stride={stride}' ) print('OK') ``` You can now use the function you just implemented to compute the size of the output of a convolution. ``` compute_conv_output_size(1, 1, 1, 1) ``` **Question [optional]:** Implement your own convolution operator **without** using any of PyTorch's (or numpy's) pre-defined convolutional functions. ``` def conv_naive(x, w, b, conv_param): """ A naive Python implementation of a convolution. The input consists of an image tensor with height H and width W. We convolve each input with a filter F, where the filter has height HH and width WW. Input: - x: Input data of shape (H, W) - w: Filter weights of shape (HH, WW) - b: Bias for filter - conv_param: A dictionary with the following keys: - 'stride': The number of pixels between adjacent receptive fields in the horizontal and vertical directions. - 'pad': The number of pixels that will be used to zero-pad the input. During padding, 'pad' zeros should be placed symmetrically (i.e equally on both sides) along the height and width axes of the input. Be careful not to modfiy the original input x directly. Returns an array. - out: Output data, of shape (H', W') where H' and W' are given by H' = 1 + (H + 2 * pad - HH) / stride W' = 1 + (W + 2 * pad - WW) / stride """ out = None H, W = x.shape filter_height, filter_width = w.shape stride, pad = conv_param['stride'], conv_param['pad'] # Check dimensions. assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work' assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work' ########################################################################### # TODO: Implement the convolutional forward pass. # # Hint: you can use the function torch.nn.functional.pad for padding. # ########################################################################### ``` You can test your implementation by running the following: ``` # Make convolution module. w_shape = (4, 4) w = torch.linspace(-0.2, 0.3, steps=torch.prod(torch.tensor(w_shape))).reshape(w_shape) b = torch.linspace(-0.1, 0.2, steps=1) # Compute output of module and compare against reference values. x_shape = (4, 4) x = torch.linspace(-0.1, 0.5, steps=torch.prod(torch.tensor(x_shape))).reshape(x_shape) out = conv_naive(x, w, b, {'stride': 2, 'pad': 1}) correct_out = torch.tensor([[0.156, 0.162], [0.036, -0.054]]) # Compare your output to ours; difference should be around e-8 print('Testing conv_forward_naive') rel_error = ((out - correct_out) / (out + correct_out + 1e-6)).mean() print('difference: ', rel_error) if abs(rel_error) < 1e-6: print('Nice work! Your implementation of a convolution layer works correctly.') else: print('Something is wrong. The output was expected to be {} but it was {}'.format(correct_out, out)) ``` **Aside: Image processing via convolutions:** As fun way to gain a better understanding of the type of operation that convolutional layers can perform, we will set up an input containing two images and manually set up filters that perform common image processing operations (grayscale conversion and edge detection). The convolution forward pass will apply these operations to each of the input images. We can then visualize the results as a sanity check. ``` # Load image of a kitten and a puppy. kitten_uri = "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1b/Persian_Cat_%28kitten%29.jpg/256px-Persian_Cat_%28kitten%29.jpg" puppy_uri = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/6e/Golde33443.jpg/256px-Golde33443.jpg" kitten, puppy = imageio.imread(kitten_uri), imageio.imread(puppy_uri) img_size = 200 # Make this smaller if it runs too slow x = np.zeros((2, 3, img_size, img_size)) x[0, :, :, :] = skimage.transform.resize(puppy, (img_size, img_size)).transpose((2, 0, 1)) x[1, :, :, :] = skimage.transform.resize(kitten, (img_size, img_size)).transpose((2, 0, 1)) x = torch.FloatTensor(x) # Set up a convolutional weights holding 2 filters, each 3x3 w = torch.zeros((2, 3, 3, 3), dtype=torch.float) # The first filter converts the image to grayscale. # Set up the red, green, and blue channels of the filter. w[0, 0, :, :] = torch.tensor([[0, 0, 0], [0, 0.3, 0], [0, 0, 0]]) w[0, 1, :, :] = torch.tensor([[0, 0, 0], [0, 0.6, 0], [0, 0, 0]]) w[0, 2, :, :] = torch.tensor([[0, 0, 0], [0, 0.1, 0], [0, 0, 0]]) # Second filter detects horizontal edges in the blue channel. w[1, 2, :, :] = torch.tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) # Vector of biases. We don't need any bias for the grayscale # filter, but for the edge detection filter we want to add 128 # to each output so that nothing is negative. b = torch.tensor([0, 128], dtype=torch.float) # Compute the result of convolving each input in x with each filter in w, # offsetting by b, and storing the results in out. out = nn.functional.conv2d(x, w, b, stride=1, padding=1).numpy() def imshow_noax(img, normalize=True): """Tiny helper to show images as uint8 and remove axis labels.""" if normalize: img_max, img_min = np.max(img), np.min(img) img = 255.0 * (img - img_min) / (img_max - img_min) plt.imshow(img.astype('uint8')) plt.gca().axis('off') # Show the original images and the results of the conv operation plt.subplot(2, 3, 1) imshow_noax(puppy, normalize=False) plt.title('Original image') plt.subplot(2, 3, 2) imshow_noax(out[0, 0]) plt.title('Grayscale') plt.subplot(2, 3, 3) imshow_noax(out[0, 1]) plt.title('Edges') plt.subplot(2, 3, 4) imshow_noax(kitten, normalize=False) plt.subplot(2, 3, 5) imshow_noax(out[1, 0]) plt.subplot(2, 3, 6) imshow_noax(out[1, 1]) plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D1_BayesianStatistics/W2D1_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Neuromatch Academy: Week 2, Day 1, Tutorial 1 # Causal inference with mixture of Gaussians **Tutorial Lecturer:** *Konrad Kording* **Tutorial Content Creator:** *Vincent Valton* ##Tutorial Objective ``` #@title Video: Intro from IPython.display import YouTubeVideo video = YouTubeVideo(id='ium-eaJz9yo', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` --- ### Tutorial objectives In this notebook we'll look at creating mixtures of Gaussian distributions by applying a mixing weight to the distributions. Mathematically, we can control how the Gaussians are mixed by summing them and using a mixing parameter $\alpha$ (comprised between zero and one): \begin{eqnarray} \text{Mixture} = \left[ \alpha \times \mathcal{N_1}(\mu_{1},\sigma_{1}) \right] + \left[ \left( 1 - \alpha \right) \times \mathcal{N_2}(\mu_{2},\sigma_{2}) \right] \end{eqnarray} where $\mathcal{N_{1}}$ and $\mathcal{N_{2}}$ are the first and second Gaussian distributions used for the mixture. Steps: 1. Implement a mixture of Gaussian prior 2. Given Bayes rule, a mixture of Gaussian prior and a Gaussian likelihood, calculate the posterior distribution 3. Create a Mixture of Gaussian prior matrix that repeats the prior over multiple rows of the matrix 4. Create a Likelihood matrix with a different likelihood mean for each row of the likelihood matrix. 5. Create a Posterior matrix that is the result (on each row of the posterior matrix), the combination of the prior and likelihood matrices (row-wise). 6. Create a binary decision matrix that reports the most likely action for each of the row-posteriors of the posterior matrix. So lets start implementing these steps, one by one. --- ##Setup Please execute the cells below to initialize the notebook environment. ``` # imports import time # import time import numpy as np # import numpy import scipy as sp # import scipy import math # import basic math functions import random # import basic random number generator functions import matplotlib.pyplot as plt # import matplotlib from IPython import display #@title Figure Settings fig_w, fig_h = (8, 6) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) plt.style.use('ggplot') %matplotlib inline %config InlineBackend.figure_format = 'retina' #@title Helper functions def my_gaussian(x_points, mu, sigma): """ DO NOT EDIT THIS FUNCTION !!! Returns un-normalized Gaussian estimated at points `x_points`, with parameters `mu` and `sigma` Args: x_points (numpy array of floats) - points at which the gaussian is evaluated mu (scalar) - mean of the Gaussian sigma (scalar) - standard deviation of the gaussian Returns: (numpy array of floats): un-normalized Gaussian (i.e. without constant) evaluated at `x` """ return np.exp(-(x_points-mu)**2/(2*sigma**2)) def plot_my_composed_prior(x, gaussian1, gaussian2, combined): """ DO NOT EDIT THIS FUNCTION !!! Plots a prior made of a mixture of gaussians Args: x (numpy array of floats): points at which the likelihood has been evaluated gaussian1 (numpy array of floats): normalized probabilities for auditory likelihood evaluated at each `x` gaussian2 (numpy array of floats): normalized probabilities for visual likelihood evaluated at each `x` posterior (numpy array of floats): normalized probabilities for the posterior evaluated at each `x` Returns: Nothing """ plt.plot(x, gaussian1, '--b', LineWidth=2, label='Gaussian 1') plt.plot(x, gaussian2, '-.b', LineWidth=2, label='Gaussian 2') plt.plot(x, combined, '-r', LineWidth=2, label='Gaussian Mixture') plt.legend() plt.ylabel('Probability') plt.xlabel('Orientation (Degrees)') def my_dynamic_plot(x, prior, likelihood, posterior_pointwise): """ DO NOT EDIT THIS FUNCTION !!! Plots the prior, likelihood and posterior distributions and update the figure Args: x (numpy array of floats): points at which the likelihood has been evaluated auditory (numpy array of floats): normalized probabilities for auditory likelihood evaluated at each `x` visual (numpy array of floats): normalized probabilities for visual likelihood evaluated at each `x` posterior (numpy array of floats): normalized probabilities for the posterior evaluated at each `x` Returns: Nothing """ plt.clf() plt.plot(x, prior, '-r', LineWidth=2, label='Prior') plt.plot(x, likelihood, '-b', LineWidth=2, label='Likelihood') plt.plot(x, posterior_pointwise, '-g', LineWidth=2, label='Posterior') plt.ylabel('Probability') plt.xlabel('Orientation (Degrees)') plt.legend() display.clear_output(wait=True) display.display(plt.gcf()) time.sleep(0.2) def plot_mymatrix(x, matrix, xlabel, ylabel, title): """ DO NOT EDIT THIS FUNCTION !!! Plots a matrix Args : x (numpy array of floats): values where matrix is evaluated matrix (numpy array of floats) xlabel (string) : label of x-axis ylabel (string) : label of y-axis title (string) : title of plot Returns: None """ plt.figure(figsize=(fig_w*1.2, fig_h)) plt.pcolor(matrix, edgecolors='w', linewidths=1) plt.colorbar() plt.xticks(np.arange(x.shape[0]), x) plt.title(title) plt.ylabel(ylabel) plt.xlabel(xlabel) plt.show() ``` --- ## a. Implement a mixture of Gaussians We now want to create a mixture of Gaussian probability density functions (PDFs), that we'll use as a prior in subsequent exercises. We provide you with ready-to-use plotting functions, and a code skeleton to plot the resulting PDF. **Suggestions** * Using the equation for the un-normalised Gaussian `my_gaussian`: * Generate a Gaussian with mean 0 and standard deviation 0.5 * Generate another Gaussian with mean 0 and standard deviation 10 * Combine the two Gaussians to make a new prior by mixing the two Gaussians with mixing parameter $\alpha$ = 0.05. Make it such that the peakier Gaussian has 95% of the weight (don't forget to normalize afterwards) * Using the function `plot_my_composed_prior` provided, plot the resulting mixture of gaussian * Play with the means and variance of the two Gaussians and observe the resulting distribution to get an intuition of how the parameters affect the mixture. **Helper function(s)** ``` help(plot_my_composed_prior) ``` ###Exercise 1 ``` x = np.arange(-10, 11, 0.1) prior_mean = 0. prior_sigma1 = .5 prior_sigma2 = 3. alpha = 0.05 ############################################################################### ## Insert your code here to: ## Create a Gaussian prior made of two Gaussians ## Both with mean 0 and standard deviation 0.5 and 3 respectively ## Make the combined prior (made of the two Gaussians) by weighing it ## using a mixing parameter alpha = 0.05 such that the peakier Gaussian has ## weight 0.95 ## Plot the two Gaussian and the resulting mixture using the function `plot_my_composed_prior` ############################################################################### ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D1_BayesianStatistics/solutions/W2D1_Tutorial2_Solution_03d850de.py) *Example output:* <img alt='Solution hint' align='left' width=613 height=477 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D1_BayesianStatistics/static/W2D1_Tutorial2_Solution_03d850de_0.png> <img src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/static/sample_output.png"/> <img width="450px" src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/Bayes/Expected_outputs/Student_BayesDay_Tutorial_2_fig_1.jpg"/> --- ## b. Bayes with mixture of Gaussians ``` #@title Video: Bayes with mixture of Gaussians video = YouTubeVideo(id='SYTaSvW_rpE', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` We now want compute the posterior using *Bayes rule*, having the mixture of Gaussian as a prior, and a Gaussian likelihood. Using the provided plotting function `my_dynamic_plot`, we'll see how the 'fat-tails' of the Gaussian mixture affects the linearity of the posterior mode as a function of the stimulus position. **Suggestions** Using the Gaussian mixture from exercise 1 as a prior: * Allow the mean of the Gaussian likelihood to vary from -8 to 8 in steps of 0.2 degree, keeping $\sigma$ of the visual stimuli to 1. * In a loop, calculate the posterior for each visual stimulus, and call the `my_dynamic_plot` function to plot it. * Calculate the mode of the posterior and plot it against the visual stimulus mean. What do you observe? **Helper function(s)** ``` help(my_dynamic_plot) ``` ###Exercise 2 ``` x = np.arange(-10, 11, 0.1) visual_mean = np.arange(-8, 9, 0.2) visual_sigma = 1. ############################################################################### ## Insert your code here to: ## Use the Gaussian mixture of Exercise 1 as your prior ## Create a Gaussian Likelihood with sigma = 1, and mean varying from -8 to 9 in increments of 0.2 Degrees ## Calculate the posterior by multiplying (pointwise) the 'auditory' and 'visual' gaussians ## (Hint: Do not forget to normalise the gaussians before plotting them) ## plot the distributions using the function `my_dynamic_plot` ## plot the posterior mode as a function of visual's mean ############################################################################### ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D1_BayesianStatistics/solutions/W2D1_Tutorial2_Solution_9b5af403.py) *Example output:* <img alt='Solution hint' align='left' width=559 height=849 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D1_BayesianStatistics/static/W2D1_Tutorial2_Solution_9b5af403_0.png> <img src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/static/sample_output.png"/> <img width="450px" src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/Bayes/Expected_outputs/Student_BayesDay_Tutorial_2_fig_2.jpg"/> --- ## c. Creating a prior matrix We now want to create a prior matrix using the mixture of gaussians prior created in exercise 1. We do this because it will help us visualize graphically what is being represented and computed at each step of the inference process (this will be particularly useful in later exercises). **Suggestions** Using the prior you defined in Exercise 1 and the range `x=[-10,10]` present in your code : * The first row of your prior matrix will be your prior defined in Ex1. * Now repeat that row prior 20 times to make a matrix of 20 row-priors. * Plot the matrix using the function `plot_mymatrix()` already pre-written in your script - `plot_mymatrix()` has row 0 at the bottom, and row 20 at the top **Helper function** ``` help(plot_mymatrix) ``` ###Exercise 3 ``` x = np.arange(-10, 11, 1) ############################################################################## ## Insert your code here to: ## Create a Gaussian prior made of two Gaussian ## Both with mu = 0 and sigma 0.5 and 3 respectively ## Make the combined prior (made of the two Gaussians) by weighing it ## using a mixing parameter alpha = 0.05 such that the peakier Gaussian has ## weight 30% ## This mixture will make up the first row of your matrix ## Now repeat this row-prior 20 times, to make up a Prior matrix of 20 identical row-priors (use the `np.tile()` function) ## Plot the Prior Matrix using the function `plt.pcolor` and the code snippet provided below ############################################################################### # Uncomment once the task (steps above) is complete # plot_mymatrix(x, prior_matrix, 'Orientation (Degree)', 'Repetitions', 'Prior Matrix') ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D1_BayesianStatistics/solutions/W2D1_Tutorial2_Solution_d9bf9f48.py) *Example output:* <img alt='Solution hint' align='left' width=581 height=412 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D1_BayesianStatistics/static/W2D1_Tutorial2_Solution_d9bf9f48_0.png> <img src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/static/sample_output.png"/> <img width="450px" src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/Bayes/Expected_outputs/Student_BayesDay_Tutorial_2_fig_3.jpg"/> --- ## d. Creating a likelihood matrix We now want to create a likelihood matrix that is made up of a Gaussian on each row of the matrix. Each row represents a different trial, with a different stimulus offset (i.e. a different likelihood mean). **Suggestions** Using the equation for the un-normalised Gaussian `my_gaussian`: * Allow the mean of the Gaussian likelihood to vary in 21 steps spaced linearly between from -8 to 8 degree, keeping $\sigma$ of the visual stimuli to 1. * Each likelihood with a different mean will make up a different row-likelihood of your matrix, such that you end up with a likelihood matrix made up of 20 row-Gaussians with different means * Plot the matrix using the function `plot_mymatrix()` already pre-written and commented-out in your script - `plot_mymatrix()` has row 0 at the bottom, and row 20 at the top ###Exercise 4 ``` visual_mean = np.linspace(-8, 8, x.shape[0]-1) visual_sigma = 2 likelihood_matrix = np.zeros_like(prior_matrix) ############################################################################### ## Insert your code here to: ## Create a Gaussian Likelihood with sigma = 1, and mean varying from -8 to 9 in 21 equally spaced steps (use `np.linspace()` function) ## Each of the Gaussian Likelihood with a different mean will make up a different 'trial' and hence a different row of your matrix ## Fill in your matrix with the 20 different Gaussian likelihoods (i.e. 20 trials) ## Plot the Likelihood Matrix using the function `plt.pcolor` and the code snippet provided below ############################################################################### # Uncomment once the task (steps above) is complete # plot_mymatrix(x, likelihood_matrix, 'Orientation (Degree)', 'Repetitions', 'Likelihood Matrix') ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D1_BayesianStatistics/solutions/W2D1_Tutorial2_Solution_e4291715.py) *Example output:* <img alt='Solution hint' align='left' width=598 height=412 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D1_BayesianStatistics/static/W2D1_Tutorial2_Solution_e4291715_0.png> <img src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/static/sample_output.png"/> <img width="450px" src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/Bayes/Expected_outputs/Student_BayesDay_Tutorial_2_fig_4.jpg"/> --- ## e. Creating a posterior matrix We now want to create the Posterior matrix. To do so, we will compute the posterior using *Bayes rule* for each trial (i.e. row wise). That is, each row of the posterior matrix will be the posterior resulting from the multiplication of the prior and likelihood of the equivalent row. Mathematically: \begin{eqnarray} Posterior\left[i, :\right] \propto Likelihood\left[i, :\right] \odot Prior\left[i, :\right] \end{eqnarray} where $\odot$ represent the [Hadamard Product](https://en.wikipedia.org/wiki/Hadamard_product_(matrices)) (i.e. the element_wise multiplication) of the Prior and Likelihood row vectors `i` from the matrix. **Suggestions** * For each row (trial) of the Prior and Likelihood matrix, calculate posterior and fill in the Posterior matrix, such that each row of the Posterior matrix represents the posterior for a different trial. * Plot the matrix using the function `plot_mymatrix` already pre-written and commented-out in your script - `plot_mymatrix()` has row 0 at the bottom, and row 20 at the top ###Exercise 5 ``` posterior_matrix = np.zeros_like(likelihood_matrix) ############################################################################### ## Insert your code here to: ## For each row of the Prior & Likelihood Matrices, calculate the resulting posterior ## Fill the Posterior Matrix with the row_posterior ## Plot the Posterior Matrix using the function `plt.pcolor` and the code snippet provided below ############################################################################### # Uncomment once the task (steps above) is complete #plot_mymatrix(x, posterior_matrix, 'Orientation (Degree)', 'Repetitions', 'Posterior Matrix') ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D1_BayesianStatistics/solutions/W2D1_Tutorial2_Solution_4e129f5a.py) *Example output:* <img alt='Solution hint' align='left' width=581 height=412 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D1_BayesianStatistics/static/W2D1_Tutorial2_Solution_4e129f5a_0.png> <img src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/static/sample_output.png"/> <img width="450px" src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/Bayes/Expected_outputs/Student_BayesDay_Tutorial_2_fig_5.jpg"/> --- ## f. Creating a binary decision matrix The subjects are asked to report one location rather than the whole posterior distribution. To do so, we're going to synthesize the posterior distribution to a point estimate (its mode), the point at which the posterior distribution is largest. In this exercise, we now want to create a binary decision matrix. To do so, we will scan the posterior matrix (i.e. row_wise), and set the matrix cell to 1 at the mode (peak) of the row posterior. This, effectively encodes the *decision* that a participant may make on a given trial (i.e. row). In this case, the modelled decision rule is to take the mode of the posterior on each trial (that is, our model makes the assumption that a participant would 'respond' with the mode of their posterior). **Suggestions** * Create a matrix of the same size as the Posterior matrix and fill it with zeros (Hint: use `np.zeros_like()`). * For each row (trial) of the Posterior matrix, calculate the mode of the posterior, and set the corresponding cell of the Binary Decision Matrix to 1. (e.g. if the mode of the posterior is at position 0, then set the cell with x_column == 0 to 1). * Plot the matrix using the function `plot_mymatrix()` already pre-written and commented-out in your script - `plot_mymatrix()` has row 0 at the bottom, and row 20 at the top ###Exercise 6 ``` binary_decision_matrix = np.zeros_like(posterior_matrix) ############################################################################### ## Insert your code here to: ## Create a matrix of the same size as the Posterior matrix and fill it with zeros (Hint: use np.zeros_like()) ## For each row of the Posterior Matrix, calculate the mode of the posterior, and set the corresponding cell of the Binary Decision Matrix to 1. ## Plot the Binary Decision Matrix using the function `plt.pcolor` and the code snippet provided below ############################################################################### # Uncomment once the task (steps above) is complete # plot_mymatrix(x, binary_decision_matrix, 'Orientation (Degree)', 'Repetitions', 'Binary Decision Matrix') ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D1_BayesianStatistics/solutions/W2D1_Tutorial2_Solution_e8c610f7.py) *Example output:* <img alt='Solution hint' align='left' width=581 height=412 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D1_BayesianStatistics/static/W2D1_Tutorial2_Solution_e8c610f7_0.png> <img src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/static/sample_output.png"/> <img width="450px" src="https://github.com/NeuromatchAcademy/course-content/raw/master/tutorials/Bayes/Expected_outputs/Student_BayesDay_Tutorial_2_fig_6.jpg"/> ``` #@title Video: Outro video = YouTubeVideo(id='YIFGXOsi0_A', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ```
github_jupyter
## Compute a Monte Carlo integral for any specified function. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import math ``` Riofa-Gean Fernandez ID: 1396498 ``` N = 500 # Number of points a = 0 #x-axis min to replace b = 1.75 #x-axis max to replace def f(x): return np.cos(x) #function to replace x = np.arange(a,b,0.01) #(start, stop, step interval) y = f(x) #function d = max(y) #y-axis maximum c = min(y) #y-axis minimum #compute the number of random points x_rand = a + (b - a)*np.random.random(N) y_rand = np.random.random(N)*d ind_below = np.where(y_rand < f(x_rand)) #points below the function ind_above = np.where(y_rand >= f(x_rand)) #points above the function #plot the function pts_below = plt.scatter(x_rand[ind_below], y_rand[ind_below], label = "Points below function", color = "green") pts_above = plt.scatter(x_rand[ind_above], y_rand[ind_above], label = "Points above function", color = "blue") plt.plot(x, y, label = "Function", color = "red") plt.legend(loc = 'lower center', ncol = 2) int_answer_1 = len(ind_below[0])/(N)*((b-a)*(d-c)) #first integral estimate (By R. Fernandez and S. Yuen) #print the answer print ("Number of points above the function:", len(ind_above[0])) print ("Number of points below the function:", len(ind_below[0])) print ("Fraction of points below the function:", int_answer_1) #By S. Yuen ``` Sierra Yuen ID: 1495259 ``` N = 10000 #number of points a2 = 0 #x-axis minimum b2 = 1.75 #x-axis maximum def f(x): return np.cos(x) #function to replace x = np.arange(a2,b2,0.01) #(start,stop,step interval) y = f(x) #function d2 = max(y) #y-axis maximum c2 = min(y) #y-axis minimum #compute the number of random points x_rand = a2 + (b2 - a2)*np.random.random(N) y_rand = np.random.random(N)*d2 ind_below = np.where(y_rand < f(x_rand)) #points below the function ind_above = np.where(y_rand >= f(x_rand)) #points above the function #plot the function pts_below = plt.scatter(x_rand[ind_below], y_rand[ind_below], label = "Dots below function", color = "green") pts_above = plt.scatter(x_rand[ind_above], y_rand[ind_above], label = "Dots above function", color = "blue") plt.plot(x, y, label = "Function", color = "red") plt.legend(loc = 'lower center', ncol = 2) int_answer_2 = len(ind_below[0])/(N)*((b2-a2)*(d2-c2)) #second integral estimate (By R. Fernandez and S. Yuen) #print the answer print ("Number of points above the function:", len(ind_above[0])) print ("Number of points below the function:", len(ind_below[0])) print ("Fraction of points below the function:", int_answer_2) #specify a tolerance for the integration tolerance = int_answer_2 - int_answer_1 #print the tolerance print(tolerance) ```
github_jupyter
``` import pandas as pd import numpy as np import os import matplotlib.pylab as plt import seaborn as sns np.random.seed(500) types_names = {90:'Ia', 67: '91bg', 52:'Iax', 42:'II', 62:'Ibc', 95: 'SLSN', 15:'TDE', 64:'KN', 88:'AGN', 92:'RRL', 65:'M-dwarf', 16:'EB',53:'Mira', 6:'MicroL', 991:'MicroLB', 992:'ILOT', 993:'CART', 994:'PISN',995:'MLString'} SNANA_types = {90:11, 62:{1:3, 2:13}, 42:{1:2, 2:12, 3:14}, 67:41, 52:43, 64:51, 95:60, 994:61, 992:62, 993:63, 15:64, 88:70, 92:80, 65:81, 16:83, 53:84, 991:90, 6:{1:91, 2:93}} SNANA_names = {11: 'Ia', 3:'Ibc', 13: 'Ibc', 2:'II', 12:'II', 14:'II', 41: '91bg', 43:'Iax', 51:'KN', 60:'SLSN', 61:'PISN', 62:'ILOT', 63:'CART', 64:'TDE', 70:'AGN', 80:'RRL', 81:'M-dwarf', 83:'EB', 84:'Mira', 90:'MicroLB', 91:'MicroL', 93:'MicroL'} fname = '/media/RESSPECT/data/PLAsTiCC/PLAsTiCC_zenodo/plasticc_test_metadata.csv' test_metadata = pd.read_csv(fname) ddf_flag = test_metadata['ddf_bool'].values == 1 ids_ddf = test_metadata['object_id'].values[ddf_flag] ids_wfd = test_metadata['object_id'].values[~ddf_flag] ``` # Check how are the Ias in DDF in comparison with WFD ``` fnames_Ia = os.listdir('Ia/results/') fnames_Ia.remove('master_fitres.fitres') fnames_Ia.remove('salt3') fnames_Ia.remove('.ipynb_checkpoints') salt2_wfd = [] for name in fnames_Ia: fitres_temp = pd.read_csv('Ia/results/' + name, delim_whitespace=True, comment='#') salt2_wfd.append(fitres_temp) salt2_Ia_wfd = pd.concat(salt2_wfd, ignore_index=True) salt2_Ia_ddf = pd.read_csv('Ia/results/master_fitres.fitres', comment='#', delim_whitespace=True) salt2_Ia_ddf['x1 - SIM_x1'] = salt2_Ia_ddf['x1'] - salt2_Ia_ddf['SIM_x1'] salt2_Ia_ddf['c - SIM_c'] = salt2_Ia_ddf['c'] - salt2_Ia_ddf['SIM_c'] salt2_Ia_ddf['x0 - SIM_x0'] = salt2_Ia_ddf['x0'] - salt2_Ia_ddf['SIM_x0'] salt2_Ia_ddf['mB - SIM_mB'] = salt2_Ia_ddf['mB'] - salt2_Ia_ddf['SIM_mB'] salt2_Ia_wfd['x1 - SIM_x1'] = salt2_Ia_wfd['x1'] - salt2_Ia_wfd['SIM_x1'] salt2_Ia_wfd['c - SIM_c'] = salt2_Ia_wfd['c'] - salt2_Ia_wfd['SIM_c'] salt2_Ia_wfd['mB - SIM_mB'] = salt2_Ia_wfd['mB'] - salt2_Ia_wfd['SIM_mB'] salt2_Ia_wfd['x0 - SIM_x0'] = salt2_Ia_wfd['x0'] - salt2_Ia_wfd['SIM_x0'] plt.figure(figsize=(24,10)) ax1 = plt.subplot(2,3,1) sns.distplot(salt2_Ia_ddf['x1'], label='DDF', ax=ax1) sns.distplot(salt2_Ia_wfd['x1'], label='WFD', ax=ax1) plt.legend() ax2 = plt.subplot(2,3,2) sns.distplot(salt2_Ia_ddf['c'], label='DDF', ax=ax2) sns.distplot(salt2_Ia_wfd['c'], label='WFD', ax=ax2) plt.legend() ax3 = plt.subplot(2,3,3) sns.distplot(salt2_Ia_ddf['mB'], label='DDF', ax=ax3) sns.distplot(salt2_Ia_wfd['mB'], label='WFD', ax=ax3) plt.legend() ax4 = plt.subplot(2,3,4) sns.distplot(salt2_Ia_ddf['x1 - SIM_x1'], label='DDF', ax=ax4) sns.distplot(salt2_Ia_wfd['x1 - SIM_x1'], label='WFD', ax=ax4) plt.legend() ax5 = plt.subplot(2,3,5) sns.distplot(salt2_Ia_ddf['c - SIM_c'], label='DDF', ax=ax5) sns.distplot(salt2_Ia_wfd['c - SIM_c'], label='WFD', ax=ax5) plt.legend() ax6 = plt.subplot(2,3,6) sns.distplot(salt2_Ia_ddf['mB - SIM_mB'], label='DDF', ax=ax6) sns.distplot(salt2_Ia_wfd['mB - SIM_mB'], label='WFD', ax=ax6) plt.legend() #plt.savefig('plots/SALT2_params_DDF_WFD.png') ``` ## Create perfect sample WFD ``` nobjs = 3000 for j in range(1, 6): perfect_sample = salt2_Ia_wfd.sample(n=nobjs, replace=False) perfect_sample['zHD'] = perfect_sample['SIM_ZCMB'] perfect_sample.fillna(value=-99, inplace=True) perfect_sample.to_csv('WFD' + str(j) + '/perfect' + str(nobjs) + '.csv', index=False) perfect_sample.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/emille_samples' + str(j) + '/perfect' + str(nobjs) + '.csv', index=False) del perfect_sample ``` # Calculate populations - WFD ``` types_names = {90: 'Ia', 67: '91bg', 52:'Iax', 42:'II', 62:'Ibc', 95: 'SLSN', 15:'TDE', 64:'KN', 88:'AGN', 92:'RRL', 65:'M-dwarf', 16:'EB',53:'Mira', 6:'MicroL', 991:'MicroLB', 992:'ILOT', 993:'CART', 994:'PISN',995:'MLString'} SNANA_names = {11: 'Ia', 3:'Ibc', 13: 'Ibc', 2:'II', 12:'II', 14:'II', 41: '91bg', 43:'Iax', 51:'KN', 60:'SLSN', 61:'PISN', 62:'ILOT', 63:'CART', 64:'TDE', 70:'AGN', 80:'RRL', 81:'M-dwarf', 83:'EB', 84:'Mira', 90:'MicroLB', 91:'MicroL', 93:'MicroL'} groups, freq = np.unique(test_metadata[~ddf_flag]['true_target'].values, return_counts=True) tot_wfd = sum(~ddf_flag) print('Type \t\t Total number \t %') for i in range(len(groups)): if types_names[groups[i]] in ['M-dwarf', 'MicroLB']: print(i, ' --- ', types_names[groups[i]], '\t', freq[i], '\t\t', round(100*freq[i]/tot_wfd, 3)) else: print(i, ' -- ', types_names[groups[i]], '\t\t', freq[i], '\t\t', round(100*freq[i]/tot_wfd, 3)) ``` # Populations for a sample with 3000 SNIa ``` data_all_wfd2 = pd.read_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/emille_samples/all_WFD.csv', index_col=False) ``` # Random ``` for j in range(1, 6): d1 = data_all_wfd2.sample(n=3000, replace=False) d1.to_csv('WFD' + str(j) + '/perfect' + str(nobjs) + '.csv', index=False) print(d1.iloc[0]) d1.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/emille_samples' + str(j) + '/random' + str(nobjs) + '.csv', index=False) del d1 nIa = freq[11] nIa_sample = 3000 fitres_types, fitres_freq = np.unique(data_all_wfd2['SIM_TYPE_INDEX'].values, return_counts=True) mock = [] for i in range(len(groups)): n_objs = int(nIa_sample * freq[i]/nIa) print(n_objs, ' --- ', types_names[groups[i]], ' --- ', SNANA_types[groups[i]]) if n_objs > 0: if isinstance(SNANA_types[groups[i]], int) and SNANA_types[groups[i]] in fitres_types: print('***', types_names[groups[i]], ' --- ', n_objs) snana_type = SNANA_types[groups[i]] flag = data_all_wfd2['SIM_TYPE_INDEX'].values == snana_type data_partial = data_all_wfd2[flag] data_partial2 = data_partial.sample(n=n_objs, replace=False) mock.append(data_partial2) elif isinstance(SNANA_types[groups[i]], dict) and len(SNANA_types[groups[i]]) == 3: print('***', types_names[groups[i]], ' --- ', n_objs) f1 = np.logical_or(data_all_wfd2['SIM_TYPE_INDEX'].values == 2, data_all_wfd2['SIM_TYPE_INDEX'].values == 12) f2 = np.logical_or(data_all_wfd2['SIM_TYPE_INDEX'].values == 14, f1) data_partial = data_all_wfd2[f2] data_partial2 = data_partial.sample(n=n_objs, replace=False) mock.append(data_partial2) elif isinstance(SNANA_types[groups[i]], dict) and len(SNANA_types[groups[i]]) == 2: print('***', types_names[groups[i]], ' --- ', n_objs) flag = np.logical_or(data_all_wfd2['SIM_TYPE_INDEX'].values == 3, data_all_wfd2['SIM_TYPE_INDEX'].values == 13) data_partial = data_all_wfd2[flag] data_partial2 = data_partial.sample(n=n_objs, replace=False) mock.append(data_partial2) mock2 = pd.concat(mock, ignore_index=True) mock2.fillna(value=-99, inplace=True) mock2['zHD'] = mock2['SIM_ZCMB'] def classification_metrics(cont): """Classification metrics for a sample of 3k SNIa. Parameters ---------- cont: float \in [0, 1] Percentage of contamination. Returns ------- accuracy: float efficiency: float purity: float figure of merit (W=1): float figure of merit (W=3): float """ totIa = 3000 ntotal = 5588 acc = (ntotal - (2* totIa * cont))/5588 eff = (totIa - totIa * cont)/3000 f1 = ((totIa - 3000 * cont)/3000) * (1 - cont) f3 = ((1 - cont) * totIa)/(((1-cont) * totIa) + 3 * ((cont) * totIa)) return acc, eff, 1 - cont, f1, f3 classification_metrics(0.02) ``` # Single contaminant ``` c1 = [[72, 'II'], [75, 'Iax'], [75, 'II'], [90, 'Iax'], [90, 'Ibc'], [90, 'II'], [95, 'AGN'], [95, '91bg'], [95, 'Iax'], [95, 'Ibc'], [95, 'II'], [98, 'AGN'], [98, '91bg'], [98, 'Iax'], [98, 'Ibc'], [98, 'II'], [99.6, 'TDE'], [99.7, 'CART'], [99, 'AGN'], [99, 'SLSN'], [99, '91bg'], [99, 'Iax'], [99, 'Ibc'], [99, 'II']] k = 5 for i in range(len(c1)): fname_salt2 = os.listdir(c1[i][1] + '/results/') if '.ipynb_checkpoints' in fname_salt2: fname_salt2.remove('.ipynb_checkpoints') fname_salt2.remove('salt3') fname_salt2.remove('master_fitres.fitres') nobjs = round(0.01* (100 - c1[i][0]) * 3000) print('nobjs = ', nobjs) salt2_wfd = [] for name in fname_salt2: try: fitres_temp = pd.read_csv(c1[i][1] + '/results/' + name, delim_whitespace=True, comment='#') salt2_wfd.append(fitres_temp) except: pass salt2_wfd = pd.concat(salt2_wfd, ignore_index=True) types, counts = np.unique(salt2_wfd['SIM_TYPE_INDEX'].values, return_counts=True) print('salt2_wfd.shape = ', salt2_wfd.shape) print('types = ', types) print('counts = ', counts) salt2_sample = salt2_wfd.sample(n=nobjs, replace=False) fnames_Ia = os.listdir('Ia/results/') fnames_Ia.remove('master_fitres.fitres') fnames_Ia.remove('salt3') fnames_Ia.remove('.ipynb_checkpoints') salt2_wfd = [] for name in fnames_Ia: fitres_temp = pd.read_csv('Ia/results/' + name, delim_whitespace=True, comment='#') salt2_wfd.append(fitres_temp) salt2_Ia_wfd = pd.concat(salt2_wfd, ignore_index=True) types, counts = np.unique(salt2_Ia_wfd['SIM_TYPE_INDEX'].values, return_counts=True) print('types = ', types) print('counts = ', counts) salt2_Ia_sample = salt2_Ia_wfd.sample(n=3000-nobjs, replace=False) final_sample = pd.concat([salt2_Ia_sample, salt2_sample], ignore_index=True) final_sample['zHD'] = final_sample['SIM_ZCMB'] final_sample.fillna(value=-99, inplace=True) print('final_sample.shape = ', final_sample.shape) if c1[i][1] in ['AGN', 'TDE', 'SLSN', 'CART', '91bg']: cont = c1[i][1] elif c1[i][1] == '91bg': cont = 'SNIa-91bg' else: cont = 'SN' + c1[i][1] fname = 'WFD' + str(k) + '/' + str(c1[i][0]) + 'SNIa' + str(round(100 - c1[i][0], 1)) + cont + '.csv' fname2 = '/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/emille_samples' + str(k) + '/' + str(c1[i][0]) + \ 'SNIa' + str(round(100 - c1[i][0], 1)) + cont + '.csv' print('fname = ', fname) print('fname2= ', fname2) final_sample.to_csv(fname, index=False) final_sample.to_csv(fname2, index=False) del final_sample del cont ``` # DDF ## Perfect ``` k = 5 np.random.seed(k) salt2_Ia_ddf = pd.read_csv('Ia/results/master_fitres.fitres', comment='#', delim_whitespace=True) types, counts = np.unique(salt2_Ia_ddf['SIM_TYPE_INDEX'].values, return_counts=True) zflag = salt2_Ia_ddf['SIM_ZCMB'].values <= 1 data = salt2_Ia_ddf[zflag] print(types) print(counts) nobjs = 3000 final_sample = data.sample(n=nobjs, replace=False) final_sample['zHD'] = final_sample['SIM_ZCMB'] final_sample.fillna(value=-99, inplace=True) final_sample.to_csv('DDF' + str(k)+ '/perfect' + str(nobjs) + '.csv') final_sample.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/ddf/emille_samples' + str(k) + \ '/perfect' + str(nobjs) + '.csv') c2 = [[72, 'II'], [75, 'II'],[86, 'Iax'], [90, 'Iax'], [90, 'II'], [91, 'Iax'], [92,'Ibc'], [95, 'Iax'], [95, 'Ibc'], [95,'II'], [98, 'Iax'], [98, 'Ibc'], [98, 'II'], [99.1,'CART'], [99.8, '91bg'], [99.9, 'AGN'], [99.9, 'SLSN'], [99, 'Iax'], [99, 'Ibc'], [99, 'II']] k = 1 np.random.seed(k) for i in range(len(c2)): if c2[i][1] not in ['AGN', 'SLSN', 'CART', '91bg']: cont = 'SN' + c2[i][1] elif c2[i][1] == '91bg': cont = 'SNIa-91bg' else: cont = c2[i][1] salt2_ddf = pd.read_csv(c2[i][1] + '/results/master_fitres.fitres', comment='#', delim_whitespace=True) types, counts = np.unique(salt2_ddf['SIM_TYPE_INDEX'].values, return_counts=True) print(types) print(counts) nobjs = round(0.01* (100 - c2[i][0]) * 3000) print(nobjs) salt2_ddf_sample = salt2_ddf.sample(n=nobjs, replace=False) salt2_Ia_ddf = pd.read_csv('Ia/results/master_fitres.fitres', comment='#', delim_whitespace=True) types, counts = np.unique(salt2_Ia_ddf['SIM_TYPE_INDEX'].values, return_counts=True) salt2_Ia_sample = salt2_Ia_ddf.sample(n=3000-nobjs, replace=False) final_sample = pd.concat([salt2_Ia_sample, salt2_ddf_sample], ignore_index=True) final_sample['zHD'] = final_sample['SIM_ZCMB'] final_sample.fillna(value=-99, inplace=True) fname2 = 'DDF' + str(k) + '/' + str(c2[i][0]) + 'SNIa' + str(round(100 - c2[i][0], 1)) + cont + '.csv' print(fname2) final_sample.to_csv(fname2, index=False) fname3 = '/media/RESSPECT/data/PLAsTiCC/for_metrics/ddf/emille_samples' + str(k) + '/' + \ str(c2[i][0]) + 'SNIa' + str(round(100 - c2[i][0], 1)) + cont + '.csv' ddd = pd.read_csv('DDF1/86SNIa14SNIax.csv', index_col=False) sum(ddd['SIM_TYPE_INDEX'].values == 11)/3000 ``` # Make list with all DDF surviving SALT2 ``` import os import pandas as pd import numpy as np fnames = os.listdir('.') fnames.remove('make_samples.ipynb') fnames.remove('summary.ipynb') fnames.remove('.ipynb_checkpoints') fnames.remove('WFD') fnames.remove('DDF') fnames.remove('DDF_Alex') fnames.remove('plots') fnames.remove('WFD_Alex') all_fitres = [] for name in fnames: try: data = pd.read_csv(name + '/results/master_fitres.fitres', comment='#', delim_whitespace=True) data.fillna(value=-99, inplace=True) data['zHD'] = data['SIM_ZCMB'] all_fitres.append(data) except: pass all_fitres = pd.concat(all_fitres, ignore_index=True) all_fitres.fillna(value=-99, inplace=True) types = np.array([SNANA_names[item] for item in all_fitres['SIM_TYPE_INDEX'].values]) all_fitres['types_names'] = types all_fitres2 = {} all_fitres2['id'] = all_fitres['CID'].values all_fitres2['redshift'] = all_fitres['SIM_ZCMB'].values all_fitres2['type'] = [SNANA_names[item] for item in all_fitres['SIM_TYPE_INDEX'].values] all_fitres2['code'] = all_fitres['SIM_TYPE_INDEX'].values all_fitres2['orig_sample'] = ['test' for i in range(all_fitres.shape[0])] all_fitres2['querayble'] = [True for i in range(all_fitres.shape[0])] all_fitres3 = pd.DataFrame(all_fitres2) all_fitres.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/ddf/emille_samples/all_DDF.csv', index=False) all_fitres all_fitres = pd.read_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/ddf/emille_samples/all_DDF.csv', index_col=False) for i in range(1,6): np.random.seed(i) d1 = all_fitres.sample(n=3000, replace=False) d1.to_csv('DDF' + str(i) + '/random3000.csv', index=False) d1.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/ddf/emille_samples' + str(i) + '/random3000.csv', index=False) del d1 ``` # Make list with all WFD surviving SALT2 ``` fnames = os.listdir('.') fnames.remove('make_samples.ipynb') fnames.remove('summary.ipynb') fnames.remove('.ipynb_checkpoints') fnames.remove('WFD') fnames.remove('DDF') fnames.remove('DDF_Alex') fnames.remove('WFD_Alex') fnames.remove('plots') all_wfd = [] data_all_wfd = [] for name in fnames: flist = os.listdir(name + '/results/') flist.remove('master_fitres.fitres') flist.remove('salt3') for elem in flist: try: data = pd.read_csv(name + '/results/' + elem, comment='#', delim_whitespace=True) data['zHD'] = data['SIM_ZCMB'] data.fillna(value=-99, inplace=True) data_all_wfd.append(data) dtemp = {} dtemp['id'] = data['CID'].values dtemp['redshift'] = data['SIM_ZCMB'].values dtemp['type'] = [SNANA_names[i] for i in data['SIM_TYPE_INDEX'].values] dtemp['code'] = data['SIM_TYPE_INDEX'].values dtemp['orig_sample'] = ['test' for i in range(data.shape[0])] dtemp['queryable'] = [True for i in range(data.shape[0])] dtemp = pd.DataFrame(dtemp) all_wfd.append(dtemp) except: pass all_fitres_wfd = pd.concat(all_wfd, ignore_index=True) data_all_wfd2 = pd.concat(data_all_wfd, ignore_index=True) data_all_wfd2.fillna(value=-99, inplace=True) data_all_wfd2.append(data) types_wfd = np.array([SNANA_names[item] for item in data_all_wfd2['SIM_TYPE_INDEX'].values]) data_all_wfd2['types_names'] = types_wfd data_all_wfd2.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/emille_samples/all_WFD.csv', index=False) all_fitres_wfd.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/samples/all_objs_survived_SALT2_WFD.csv', index=False) all_fitres_wfd = pd.read_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/emille_samples/all_WFD.csv', index_col=False) for i in range(1,6): np.random.seed(i) d1 = all_fitres_wfd.sample(n=3000, replace=False) d1.to_csv('WFD' + str(i) + '/random3000.csv', index=False) d1.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/emille_samples' + str(i) + '/random.csv', index=False) del d1 types = [SNANA_names[item] for item in all_fitres_wfd['SIM_TYPE_INDEX'].values] all_fitres_wfd['types_names'] = types all_fitres_wfd = all_fitres_wfd.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/emille_samples/all_WFD.csv', index=False) ``` # plots ``` import matplotlib.pylab as plt import seaborn as sns types = np.array([SNANA_names[item] for item in all_fitres['SIM_TYPE_INDEX'].values]) sntype, freq = np.unique(types, return_counts=True) plt.pie(freq, labels=sntype, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') plt.show() types_wfd = np.array([SNANA_names[item] for item in data_all_wfd2['SIM_TYPE_INDEX'].values]) sntype, freq = np.unique(types_wfd, return_counts=True) plt.pie(freq, labels=sntype, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') plt.show() ``` # Kyle results - DDF ``` fname_ddf = '/media/kara/resspect_metric/workspace/kyle_boone_ddf.csv' kyle_ddf = pd.read_csv(fname_ddf, names=['object_id','6','15','16','42','52','53','62','64','65','67','88', '90','92','95'], skiprows=1) class_final = [] for i in range(kyle_ddf.shape[0]): indx = np.argsort(kyle_ddf.iloc[i].values[1:])[-1] code = int(kyle_ddf.keys()[indx + 1]) class_final.append(types_names[code]) class_final = np.array(class_final) flag_class_Ia = class_final == 'Ia' kyle_ddf_Ia = kyle_ddf[flag_class_Ia] k = 5 np.random.seed(k) kyle_ddf_sample = kyle_ddf_Ia.sample(n=3000, replace=False) fitres_ddf_flag = np.array([item in kyle_ddf_sample['object_id'].values for item in all_fitres['CID'].values]) sum(fitres_ddf_flag) kyle_fitres_ddf = all_fitres[fitres_ddf_flag] ids, freq = np.unique(kyle_fitres_ddf['CID'].values, return_counts=True) sum(freq > 1 ) kyle_fitres_ddf.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/ddf/emille_samples' + str(k) + '/fiducial3000.csv', index=False) kyle_fitres_ddf.to_csv('/media/emille/git/COIN/RESSPECT_work/PLAsTiCC/metrics_paper/resspect_metric/SALT2_fit/DDF' + str(k) + '/fiducial3000.csv', index=False) sum(kyle_fitres_ddf['SIM_TYPE_INDEX'].values == 11) ``` # Kyle results - WFD ``` np.random.seed(750) fname_wfd = '/media/kara/resspect_metric/workspace/kyle_boone_wfd.csv' kyle_wfd = pd.read_csv(fname_wfd, names=['object_id','6','15','16','42','52','53','62','64','65','67','88', '90','92','95'], skiprows=1) class_final = [] for i in range(kyle_wfd.shape[0]): indx = np.argsort(kyle_wfd.iloc[i].values[1:])[-1] code = int(kyle_wfd.keys()[indx + 1]) class_final.append(types_names[code]) class_final = np.array(class_final) flag_class_Ia = class_final == 'Ia' kyle_wfd_Ia = kyle_wfd[flag_class_Ia] kyle_wfd_sample = kyle_wfd_Ia.sample(n=3000, replace=False) fitres_wfd_flag = np.array([item in kyle_wfd_sample['object_id'].values for item in data_all_wfd2['CID'].values]) sum(fitres_wfd_flag) kyle_fitres_wfd = data_all_wfd2[fitres_wfd_flag] kyle_fitres_wfd2 = kyle_fitres_wfd.drop_duplicates(subset=['CID'], keep='first') ids, freq = np.unique(kyle_fitres_wfd2['CID'].values, return_counts=True) sum(freq > 1 ) k = 5 kyle_fitres_wfd2.to_csv('/media/RESSPECT/data/PLAsTiCC/for_metrics/wfd/emille_samples' + str(k) + '/fiducial3000.csv', index=False) kyle_fitres_wfd2.to_csv('/media/emille/git/COIN/RESSPECT_work/PLAsTiCC/metrics_paper/resspect_metric/SALT2_fit/WFD' + str(k) + '/fiducial3000.csv', index=False) sum(kyle_fitres_wfd2['SIM_TYPE_INDEX'].values == 11)/3000 ```
github_jupyter
## PmodOLED Example ## Contents * [Introduction](#Introduction) * [Setup the board and PmodOLED](#Setup-the-board-and-PmodOLED,-and-load-the-overlay) * [Write to the PmodOLED](#Write-to-the-PmodOLED) * [Draw some patterns](#Draw-some-patterns) * [Create a new Python function](#Create-a-new-Python-function) * [Putting it all together](#Putting-it-all-together) ---- ## Introduction This demonstration shows how to use the [PmodOLED](https://reference.digilentinc.com/reference/pmod/pmodoled/start) using the PYNQ-Z1 or PYNQ-Z2 board. ---- ## Setup the board and PmodOLED, and load the overlay ### Connect the PmodOLED to the board. In this example the ***PmodOLED*** should be connected to ***PMODA.*** Download the base overlay ``` from pynq.overlays.base import BaseOverlay base = BaseOverlay("base.bit") ``` Create an oled instance ``` from pynq.lib.pmod import Pmod_OLED # Connect to PMODA pmod_oled = Pmod_OLED(base.PMODA) ``` ## Write to the PmodOLED ``` pmod_oled.clear() pmod_oled.write(' Welcome\n to\n PYNQ!') ``` #### You should now see the text output on the OLED. Try another message: ``` pmod_oled.clear() pmod_oled.write('Python and Zynq\nProductivity & performance') ``` Clear the display when finished. ``` pmod_oled.clear() ``` System information can be captured and stored in Python variables, and written to the peripheral. ``` hostname = !hostname #Get primary IP address ip_addr = !hostname -I | cut -f1 -d' ' #Get CPU architecture cpu = !cat /proc/cpuinfo | grep "model name" | head -n 1 | cut -f3 -d' ' pmod_oled.write(hostname[0] + "\nIP:" + ip_addr[0] + '\nCPU: ' + cpu[0]) pmod_oled.clear() ``` ---- ## Draw some patterns The PmodOLED includes some built in functions running in C code on the IOP. For drawing lines and rectangles, the `draw_line()` `draw_rectangle()` functions are provided. The OLED display area is 32 pixels x 128 pixels. ### Draw a line A line can be drawn by specifying two co-ordinates: pmod_oled.draw_line(x1, y1, x2,y2) You can execute the next cell, or change the co-ordinates and execute the cell below to draw another line. `pmod_oled.clear()` should be called to clear the display if you do not want lines drawn on top of previous lines. If the bitstream is reloaded the display will also be cleared. ``` pmod_oled.draw_line(0,0,128,32) pmod_oled.draw_line(0,32,128,0) pmod_oled.clear() pmod_oled.draw_line(64,0,64,32) ``` Clear the display when finished. ``` pmod_oled.clear() ``` ### Draw a rectangle You can draw a rectangle in a similar way by specifying two co-ordinates: pmod_oled.draw_line(x1, y1, x2,y2). This will draw a rectangle using the two points as opposite corners ``` pmod_oled.draw_rect(60,5,80,25) pmod_oled.draw_rect(105,0,120,28) ``` Clear the display when finished. ``` pmod_oled.clear() ``` ---- ## Create a new Python function More functions could be implemented in the C code running on the IOP to generate other patterns. The existing functions can also be extended in Python to add more functionality. The following cell defines a function to draw circles on the PmodOLED. ``` import math # Draw a circle # Screen resolution is 128x32 def draw_circle(cx,cy, r): for i in range (0, 360): x = cx + r * math.cos(i*math.pi/180) if x > 127: x = 127 if x < 0: x = 0 y = cy + r * math.sin(i*math.pi/180) if y > 31: y = 31 if y < 0: y = 0 pmod_oled.draw_line(int(x),int(y),int(x+1),int(y)) ``` ### Draw the circle You can draw a circle by using the function which has just been created, and by specify a co-ordinate and the radius. ``` pmod_oled.clear() draw_circle(64,16,15) ``` Remember the display is 128x32 pixels. If the circle exceeds the display area it will be clipped. ``` pmod_oled.clear() draw_circle(64,32,15) ``` Additional functionality can be added easily in Python, but note that functions in Python will be slower than using the C functions running directly on the IOP. (In this case, the circle co-ordinates are calculated in Python, and the IOP draw_line() is called 360 times which is much slower than simply drawing a single line using the draw_line() function.) ---- ## Putting it all together Draw some patterns ``` pmod_oled.clear() pmod_oled.draw_line(0,0,128,32) pmod_oled.draw_rect(60,5,80,25) pmod_oled.draw_rect(105,0,120,28) draw_circle(16,16,16) pmod_oled.clear() for i in range (0,9): draw_circle(16,16,i*2) for i in range (0,6): draw_circle(48,16,1+i*3) for i in range (0,5): draw_circle(80,16,i*4) for i in range (0,4): draw_circle(111,16,1+i*5) ```
github_jupyter
# A Brief Intro to pydeck pydeck is made for visualizing data points in 2D or 3D maps. Specifically, it handles - rendering large (>1M points) data sets, like LIDAR point clouds or GPS pings - large-scale updates to data points, like plotting points with motion - making beautiful maps Under the hood, it's powered by the [deck.gl](https://github.com/uber/deck.gl/) JavaScript framework. pydeck is strongest when used in tandem with [Pandas](https://pandas.pydata.org/) but doesn't have to be. Please note that **these demo notebooks are best when executed cell-by-cell**, so ideally clone this repo. ``` import pydeck as pdk ``` # There are three steps for most pydeck visualizations We'll walk through pydeck using a visualization of vehicle accident data in the United Kingdom. ## 1. Choose your data Here, we'll use the history of accident data throughout the United Kingdom. This data set presents the location of every latitude and longitude of car accidents in the UK in 2014 ([source](https://data.gov.uk/dataset/053a6529-6c8c-42ac-ae1e-455b2708e535/road-traffic-accidents)). ``` import pandas as pd UK_ACCIDENTS_DATA = 'https://raw.githubusercontent.com/uber-common/deck.gl-data/master/examples/3d-heatmap/heatmap-data.csv' pd.read_csv(UK_ACCIDENTS_DATA).head() ``` ## 2. Configure the visualization: Choose your layer(s) and viewport pydeck's **`Layer`** object takes two positional and many keyword arguments: - First, a string specifying the layer type, with our example below using `'HexagonLayer'` - Next, a data URL–below you'll see the `UK_ACCIDENTS_DATA` that we set above, but we could alternately pass a data frame or list of dictionaries - Finally, keywords representing that layer's attributes–in our example, this would include `elevation_scale`, `elevation_range`, `extruded`, `coverage`. ```python layer = pdk.Layer( 'HexagonLayer', UK_ACCIDENTS_DATA, elevation_scale=50, elevation_range=[0, 3000], extruded=True, coverage=1) ``` There is of course an entire catalog of layers which you're welcome to check out within the [deck.gl documentation](https://deck.gl/#/documentation/deckgl-api-reference/layers/overview). ### Configure your viewport We also have to specifiy a **`ViewState`** object. The **`ViewState`** object specifies a camera angle relative to the map data. If you don't want to manually specify it, the function **`pydeck.data_utils.autocompute_viewport`** can take your data and automatically zoom to it. pydeck also provides some controls, most of which should be familiar from map applications throughout the web. By default, you can hold out and drag to rotate the map. ``` layer = pdk.Layer( 'HexagonLayer', UK_ACCIDENTS_DATA, elevation_scale=50, elevation_range=[0, 3000], extruded=True, coverage=1) # Set the viewport location view_state = pdk.ViewState( longitude=-1.415, latitude=52.2323, zoom=6, min_zoom=5, max_zoom=15, pitch=40.5, bearing=-27.396) # Combined all of it and render a viewport r = pdk.Deck(layers=[layer], initial_view_state=view_state) r.show() ``` ## Render an update to the visualization Execute the cell below and look at the map in the cell above–you'll notice a seamless rendered update on the map ``` layer.elevation_range = [0, 5000] r.update() ``` ## Support updates over time We can combine any Python function with our work here, of course. Execute the cell below to update our map above over time. ``` import time for i in range(0, 10000, 1000): layer.elevation_range = [0, i] r.update() time.sleep(0.1) ```
github_jupyter
# Tutorial 4: Scattering calculations with Tully's models ``` import sys import cmath import math import os import time import h5py import matplotlib.pyplot as plt # plots import numpy as np #from matplotlib.mlab import griddata %matplotlib inline if sys.platform=="cygwin": from cyglibra_core import * elif sys.platform=="linux" or sys.platform=="linux2": from liblibra_core import * from libra_py import units import libra_py.models.Tully as Tully from libra_py import tsh from libra_py import tsh_stat from libra_py import data_conv from libra_py import data_savers from libra_py import dynamics_plotting #from libra_py import dynamics_exact import util.libutil as comn import libra_py.dynamics.exact.compute as compute import libra_py.dynamics.exact.save as save import libra_py.dynamics.exact.plot as plot plt.rc('axes', titlesize=24) # fontsize of the axes title plt.rc('axes', labelsize=20) # fontsize of the x and y labels plt.rc('legend', fontsize=20) # legend fontsize plt.rc('xtick', labelsize=16) # fontsize of the tick labels plt.rc('ytick', labelsize=16) # fontsize of the tick labels plt.rc('figure.subplot', left=0.2) plt.rc('figure.subplot', right=0.95) plt.rc('figure.subplot', bottom=0.13) plt.rc('figure.subplot', top=0.88) colors = {} colors.update({"11": "#8b1a0e"}) # red colors.update({"12": "#FF4500"}) # orangered colors.update({"13": "#B22222"}) # firebrick colors.update({"14": "#DC143C"}) # crimson colors.update({"21": "#5e9c36"}) # green colors.update({"22": "#006400"}) # darkgreen colors.update({"23": "#228B22"}) # forestgreen colors.update({"24": "#808000"}) # olive colors.update({"31": "#8A2BE2"}) # blueviolet colors.update({"32": "#00008B"}) # darkblue colors.update({"41": "#2F4F4F"}) # darkslategray clrs_index = ["11", "21", "31", "41", "12", "22", "32", "13","23", "14", "24"] ``` ## 1. Define the model & plot the PES ``` def compute_model(q, params, full_id): model = params["model"] res = None if model==1: res = Tully.Tully1(q, params) elif model==2: res = Tully.Tully2(q, params) elif model==3: res = Tully.Tully3(q, params) return res def potential(q, params): """ Thin wrapper of the model Hamiltonians that can be used in the fully-quantum calculations """ # Diabatic properties obj = compute_model(q, params, Py2Cpp_int([0,0])) # Adiabatic properties nadi = len(params["E_n"]) ndof = 1 ham = nHamiltonian(nadi, nadi, ndof) # ndia, nadi, nnucl ham.init_all(2) ham.compute_diabatic(compute_model, q, params) ham.compute_adiabatic(1); obj.ham_adi = ham.get_ham_adi() obj.dc1_adi = CMATRIXList() for n in range(ndof): x = ham.get_dc1_adi(n) for i in range(nadi): for j in range(nadi): if i!=j: #pass if math.fabs(x.get(i,j).real)>1e+10: x.set(i,j, 0.0+0.0j) x.set(j,i, 0.0+0.0j) obj.dc1_adi.append( x ) return obj param_sets = [ {"model":1, "E_n":[0.0, 0.0], "nstates":2 }, {"model":2, "E_n":[0.0, 0.0], "nstates":2 }, {"model":3, "E_n":[0.0, 0.0], "nstates":2 } ] plot_params = {"colors": colors, "clrs_index": clrs_index, "xlim":[-15, 15], "ylim":[-0.015, 0.015 ]} dynamics_plotting.plot_surfaces(compute_model, [ param_sets[0] ], [0,1], -15.0, 15.0, 0.05, plot_params) ``` ## 2. Run the calculations ``` model_params = dict(param_sets[0]) properties_to_save = [ "timestep", "time", "Ekin_dia", "Ekin_adi", "Epot_dia", "Epot_adi", "Etot_dia", "Etot_adi", "norm_dia", "norm_adi", "pop_dia", "pop_adi", "q_dia", "q_adi", "q2_dia", "q2_adi", "p_dia", "p_adi", "p2_dia", "p2_adi", "denmat_dia", "denmat_adi", "custom_pops", "PSI_dia", "PSI_adi", "reciPSI_dia", "reciPSI_adi" ] params = { "nsteps":200, "dt":10.0, "progress_frequency":0.1, "rmin":[-35.0], "rmax":[35.0], "dx":[0.1], "nstates":2, "x0":[-10.0], "p0":[20.0], "istate":[1,0], "masses":[2000.0], "k":[0.001], "integrator":"SOFT", "prefix":"Tut4-1", "hdf5_output_level":3, "compression_level":[0,0,0], "use_compression":0, "mem_output_level":3, "txt_output_level":0, "properties_to_save": properties_to_save, "custom_pops":[ [0, [-40], [-5]], [0, [-5], [5]], [0, [5], [40]], [1, [-40], [-5]], [1, [-5], [5]], [1, [5], [40]] ] } params1 = dict(params) params1.update({ "prefix":"Tut4-1" }) res = compute.run_relaxation( params1, potential, model_params ) ``` ## 3. Plot the results ``` with h5py.File("Tut4-1/data.hdf", 'r') as f: t = list(f["time/data"][:]) print(t) #print(list(f["boxed_pops/0/data"][:, 0, 0])) print(list(f["custom_pops/data"][:, 0, 0, 0])) print(list(f["pop_adi/data"][:, 0, 0])) plot_params = {"prefix":"Tut4-1", "filename":"mem_data.hdf", "hdf5_output_level":2, "which_dofs":[0], "which_adi_states":[0, 1], "which_dia_states":[0, 1], "properties_to_save": [ "timestep", "time", "Ekin_dia", "Ekin_adi", "Epot_dia", "Epot_adi", "Etot_dia", "Etot_adi", "norm_dia", "norm_adi", "pop_dia", "pop_adi", "q_dia", "q_adi", "q2_dia", "q2_adi", "p_dia", "p_adi", "p2_dia", "p2_adi", "denmat_dia", "denmat_adi", "custom_pops", "PSI_dia", "PSI_adi", "reciPSI_dia", "reciPSI_adi" ] } plot.plot_hdf5(plot_params) def plot_custom_pops(plot_params): """ This function is meant to plot the results stored in the hdf files generated by the exact dynamics runs Args: prefix ( string ): the name of the directory containing the input HDF5 file This directory will also be used to output the generated picture files [ default : "out"] filename ( string ): name of the HDF5 file to read [ default: "data.hdf"] output_level ( int ): the level of info contained in the HDF5 file [ default : 3] which_adi_states ( list of ints ) : indices of the adiabatic states to print [ default: [0] ] which_dia_states ( list of ints ) : indices of the diabatic states to print [ default: [0] ] colors ( dictionary ): the definition of the colors to use clrs_index ( list of strings ) : defines the mapping of the colors on integers and vice versa """ plt.rc('axes', titlesize=24) # fontsize of the axes title plt.rc('axes', labelsize=20) # fontsize of the x and y labels plt.rc('legend', fontsize=20) # legend fontsize plt.rc('xtick', labelsize=16) # fontsize of the tick labels plt.rc('ytick', labelsize=16) # fontsize of the tick labels plt.rc('figure.subplot', left=0.2) plt.rc('figure.subplot', right=0.95) plt.rc('figure.subplot', bottom=0.13) plt.rc('figure.subplot', top=0.88) colors = {} colors.update({"11": "#8b1a0e"}) # red colors.update({"12": "#FF4500"}) # orangered colors.update({"13": "#B22222"}) # firebrick colors.update({"14": "#DC143C"}) # crimson colors.update({"21": "#5e9c36"}) # green colors.update({"22": "#006400"}) # darkgreen colors.update({"23": "#228B22"}) # forestgreen colors.update({"24": "#808000"}) # olive colors.update({"31": "#8A2BE2"}) # blueviolet colors.update({"32": "#00008B"}) # darkblue colors.update({"41": "#2F4F4F"}) # darkslategray clrs_index = ["11", "21", "31", "41", "12", "22", "32", "13","23", "14", "24"] # Parameters and dimensions critical_params = [ ] default_params = { "prefix":"out", "filename":"data.hdf", "hdf5_output_level":2, "colors":colors, "clrs_index":clrs_index, "figs":[] } comn.check_input(plot_params, default_params, critical_params) filename = plot_params["filename"] prefix = plot_params["prefix"] hdf5_output_level = plot_params["hdf5_output_level"] colors = plot_params["colors"] clrs_index = plot_params["clrs_index"] figs = plot_params["figs"] out_prefix = prefix with h5py.File(F"{prefix}/{filename}", 'r') as f: t = None if "time" in properties_to_save: t = list(f["time/data"][:]) #=============== Populations ====================== if t != None: nfigs = len(figs) for ifig in range(nfigs): plt.figure(ifig, figsize=(12, 12)) # dpi=300, frameon=False) plt.subplot(1, 1, 1) #plt.ylim(0, 1) plt.title(F'{figs[ifig][0]}' ) plt.xlabel('Time, a.u.') plt.ylabel('Population') nlines = len(figs[ifig]) for i in range(1, nlines): line_label = figs[ifig][i][0] pop_type = figs[ifig][i][1] istate = figs[ifig][i][2] line_color_index = figs[ifig][i][3] clr = colors[clrs_index[ line_color_index ]] Pi = list(f["custom_pops/data"][:, pop_type, istate, 0]) plt.plot(t, Pi, label=F'{line_label}', linewidth=10, color = clr) plt.legend() plt.savefig(F"{prefix}/Custom_pops_{i-1}.png", dpi=300) plt.savefig(F"{prefix}/Custom_pops_{i-1}.pdf", dpi=300) plt.show() plt.close() _plot_params = { "prefix":"Tut4-1", "filename":"mem_data.hdf", "hdf5_output_level":2, "colors":colors, "clrs_index":clrs_index, "figs":[ [ "Diabatic pops", ["reflection on the lower state", 0, 0, 0], ["unreacted on the lower state", 1, 0, 1], ["transmission on the lower state", 2, 0, 2] ], [ "Diabatic pops", ["reflection on the upper state", 0, 1, 0], ["unreacted on the upper state", 1, 1, 1], ["transmission on the upper state", 2, 1, 2] ], [ "Adiabatic pops", ["reflection on the lower state", 3, 0, 0], ["unreacted on the lower state", 4, 0, 1], ["transmission on the lower state", 5, 0, 2] ], [ "Adiabatic pops", ["reflection on the upper state", 3, 1, 0], ["unreacted on the upper state", 4, 1, 1], ["transmission on the upper state", 5, 1, 2] ] ] } plot_custom_pops(_plot_params) ``` ## Scattering probabilities Now, lets repeat the calculations many times, with different initial momenta and save all the results in different folders ``` prefix = "Tut4-2" model_params = dict(param_sets[0]) properties_to_save = [ "timestep", "time", "Ekin_dia", "Ekin_adi", "Epot_dia", "Epot_adi", "Etot_dia", "Etot_adi", "norm_dia", "norm_adi", "pop_dia", "pop_adi", "q_dia", "q_adi", "q2_dia", "q2_adi", "p_dia", "p_adi", "p2_dia", "p2_adi", "denmat_dia", "denmat_adi", "custom_pops", "PSI_dia", "PSI_adi", "reciPSI_dia", "reciPSI_adi" ] params = { "nsteps":200, "dt":10.0, "progress_frequency":0.1, "rmin":[-35.0], "rmax":[35.0], "dx":[0.1], "nstates":2, "x0":[-10.0], "p0":[20.0], "istate":[1,0], "masses":[2000.0], "k":[0.001], "integrator":"SOFT", "prefix":"Tut4-2", "hdf5_output_level":0, "compression_level":[0,0,0], "use_compression":0, "mem_output_level":3, "txt_output_level":0, "properties_to_save": properties_to_save, "custom_pops":[ [0, [-40], [-5]], [0, [-5], [5]], [0, [5], [40]], [1, [-40], [-5]], [1, [-5], [5]], [1, [5], [40]] ] } if not os.path.isdir(prefix): os.mkdir(prefix) P0 = [5.0, 6.0, 7.0, 8.0, 10.0, 12.0, 13.0, 15.0, 18.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0] ninit = len(P0) for i in range(ninit): print(F"=============== initial momentum {P0[i]} ============") if not os.path.isdir(F"{prefix}/{i}"): os.mkdir(F"{prefix}/{i}") params1 = dict(params) params1.update({"prefix": F"{prefix}/{i}", "p0":[P0[i] ], "nsteps":int(200 * (200.0/P0[i])) }) compute.run_relaxation( params1, potential, model_params ) P0 = [ 5.0, 6.0, 7.0, 8.0, 10.0, 12.0, 13.0, 15.0, 18.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0] ninit = len(P0) plt.figure(1, figsize=(48, 12)) # dpi=300, frameon=False) plt.subplot(1, 3, 1) plt.title("Unreacted pop") plt.xlabel('Time, a.u.') plt.ylabel('Population') for i in [7]: #range(ninit): nclrs = len(clrs_index) clr = colors[clrs_index[ i % nclrs]] with h5py.File(F"Tut4-2/{i}/mem_data.hdf", 'r') as f: t = list(f["time/data"][:]) p0 = list(f["custom_pops/data"][:, 4, 0, 0]) # adiabatic not reacted, on state 0 p1 = list(f["custom_pops/data"][:, 4, 1, 0]) # adiabatic not reacted, on state 1 p_unreact = [] sz = len(p0) for j in range(sz): p_unreact.append(p0[j] + p1[j]) #print(F" === init cond = {i} ===") #print(p) plt.plot(t, p_unreact, label=F'{i}', linewidth=10, color = clr) plt.legend() plt.subplot(1, 3, 2) plt.title("Reflected pop") plt.xlabel('Time, a.u.') plt.ylabel('Population') for i in [7]: #range(ninit): nclrs = len(clrs_index) clr = colors[clrs_index[ i % nclrs]] with h5py.File(F"Tut4-2/{i}/mem_data.hdf", 'r') as f: t = list(f["time/data"][:]) p0 = list(f["custom_pops/data"][:, 3, 0, 0]) # adiabatic not reacted, on state 0 p1 = list(f["custom_pops/data"][:, 3, 1, 0]) # adiabatic not reacted, on state 1 p_refl = [] sz = len(p0) for j in range(sz): p_refl.append(p0[j] + p1[j]) #print(F" === init cond = {i} ===") #print(p) plt.plot(t, p_refl, label=F'{i}', linewidth=10, color = clr) plt.legend() ```
github_jupyter
``` import numpy as np import pandas as pd from sklearn.metrics import roc_auc_score import tensorflow as tf from sklearn.feature_extraction.text import CountVectorizer from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical import re from livelossplot import PlotLossesKeras import sys, os, re, csv, codecs, numpy as np, pandas as pd from keras.layers import GRU from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation from keras.layers import Bidirectional, GlobalMaxPool1D from keras.models import Model from keras import initializers, regularizers, constraints, optimizers, layers from keras.layers import Input, Dense,multiply from keras.layers.core import * from keras.layers.recurrent import LSTM from keras.models import * import attention_utils data = pd.read_csv('Sentiment.csv.zip') data = data[['text','sentiment']] data = data[data.sentiment != "Neutral"] data['text'] = data['text'].apply(lambda x: x.lower()) data['text'] = data['text'].apply((lambda x: re.sub('[^a-zA-z0-9\s]','',x))) print('pozitive size', data[ data['sentiment'] == 'Positive'].size) print('negative size', data[ data['sentiment'] == 'Negative'].size) for idx,row in data.iterrows(): row[0] = row[0].replace('rt',' ') max_features = 2000 tokenizer = Tokenizer(num_words=max_features, split=' ') tokenizer.fit_on_texts(data['text'].values) X = tokenizer.texts_to_sequences(data['text'].values) X = pad_sequences(X) def as_keras_metric(method): import functools from keras import backend as K import tensorflow as tf @functools.wraps(method) def wrapper(self, args, **kwargs): """ Wrapper for turning tensorflow metrics into keras metrics """ value, update_op = method(self, args, **kwargs) K.get_session().run(tf.local_variables_initializer()) with tf.control_dependencies([update_op]): value = tf.identity(value) return value return wrapper auc_roc = as_keras_metric(tf.metrics.auc) recall = as_keras_metric(tf.metrics.recall) embed_dim = 128 lstm_out = 196 model = Sequential() model.add(Embedding(max_features, embed_dim,input_length = X.shape[1])) model.add(SpatialDropout1D(0.5)) model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(2,activation='softmax')) model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy', auc_roc]) print(model.summary()) Y = pd.get_dummies(data['sentiment']).values X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33, random_state = 42) print(X_train.shape,Y_train.shape) print(X_test.shape,Y_test.shape) callbacks = [ PlotLossesKeras()] batch_size = 128 model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs = 10, batch_size=batch_size, callbacks = callbacks) scores = model.evaluate(X_test, Y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # Bidirectional LSTM embed_dim = 128 lstm_out = 196 model = Sequential() model.add(Embedding(max_features, embed_dim,input_length = X.shape[1])) model.add(SpatialDropout1D(0.5)) model.add(Bidirectional(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))) model.add(Dense(2,activation='softmax')) model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy', auc_roc]) batch_size = 64 model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs = 10, batch_size=128, callbacks = callbacks) scores = model.evaluate(X_test, Y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # Bidirectional LSTM embed_dim = 128 lstm_out = 196 model = Sequential() model.add(Embedding(max_features, embed_dim,input_length = X.shape[1])) model.add(SpatialDropout1D(0.4)) model.add(Bidirectional(GRU(lstm_out, dropout=0.2, recurrent_dropout=0.2))) model.add(Dense(2,activation='softmax')) model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy', auc_roc]) batch_size = 64 model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs = 10, batch_size=128, callbacks = callbacks) scores = model.evaluate(X_test, Y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) X.shape[1] MAX_SEQUENCE_LENGTH = X.shape[1] embedding_layer = Embedding(max_features, embed_dim, input_length = X.shape[1]) lstm_layer = LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2,return_sequences=True) comment_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_sequences= embedding_layer(comment_input) x = lstm_layer(embedded_sequences) x = Dropout(0.2)(x) merged = Attention(MAX_SEQUENCE_LENGTH)(x) merged = Dense(2, activation='relu')(merged) merged = Dropout(0.4)(merged) #merged = BatchNormalization()(merged) preds = Dense(2, activation='sigmoid')(merged) model = Model(inputs=[comment_input], \ outputs=preds) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', auc_roc]) print(model.summary()) model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs = 10, batch_size=64, callbacks = callbacks) scores = model.evaluate(X_test, Y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) class Attention(Layer): def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention()) """ self.supports_masking = True #self.init = initializations.get('glorot_uniform') self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs) def build(self, input_shape): assert len(input_shape) == 3 self.W = self.add_weight((input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight((input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): # do not pass the mask to the next layers return None def call(self, x, mask=None): # eij = K.dot(x, self.W) TF backend doesn't support it # features_dim = self.W.shape[0] # step_dim = x._keras_shape[1] features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim)) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) # apply mask after the exp. will be re-normalized next if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano a *= K.cast(mask, K.floatx()) # in some cases especially in the early stages of training the sum may be almost zero a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx()) a = K.expand_dims(a) weighted_input = x * a #print weigthted_input.shape return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): #return input_shape[0], input_shape[-1] return input_shape[0], self.features_dim ```
github_jupyter
# Training fine flow prediction Assuming source image $I_s$ and target image $I_t$ are already coarsely aligned, this notebook will try to predict a fine flow $F_{s\rightarrow t}$ between them. TODO describe objective functions used in this project ``` %load_ext autoreload %autoreload 2 ``` We assume you already have a zipped dataset in `data` folder. ``` %cd workspace !ln -s ../data/MegaDepth_cleansed.zip MegaDepth_cleansed.zip ``` If you are working in Google Colab, you might find this cell useful. It performs 0. Sanity check if you are using Google Colab 1. Mount Google Drive. 2. Assume you have a folder `RANSAC-Flow` that is equivalent to this repository, which contains `data` folder. 3. Copy the dataset to `/tmp` ``` # 0. try: import google.colab IN_COLAB = True except ModuleNotFoundError: IN_COLAB = False finally: if not IN_COLAB: raise RuntimeError('not running on Google Colab') # 1. from google.colab import drive drive.mount('/content/drive') # 2. %cd /content/drive/MyDrive/RANSAC-Flow # 3. !rsync -ah --progress data/MegaDepth_cleansed.zip /tmp !ln -s /tmp/MegaDepth_cleansed.zip MegaDepth_cleansed.zip ``` Import packages that we will use throughout this notebook. ``` from pytorch_lightning import Trainer from pytorch_lightning.callbacks import EarlyStopping from pytorch_lightning.loggers import TensorBoardLogger ``` We enable logging here to make debug easier. ``` import logging logging.basicConfig( level=logging.INFO, format="[%(asctime)s] %(name)s :: %(levelname)s :: %(message)s", handlers=[logging.StreamHandler()], ) # fine tune submodules log level logging.getLogger("ransacflow").setLevel(logging.WARNING) logging.getLogger('ransacflow.data').setLevel(logging.WARNING) ``` ## Prepare dataset We already pack some datasets used in the original paper as `LightningDataModule`. We will import it here. ``` from ransacflow.data import MegaDepthDataModule mega_depth = MegaDepthDataModule( "MegaDepth_cleansed.zip", train_image_size=224, train_batch_size=2, val_image_size=480, ) #TODO add some sanity check for the dataset here, previews # TODO setup environments for the following training sessions, how? # FIXME is it possible to share the Trainer object across all 3 stages ``` ## Stage 1 Only train the **reconstruction loss**. It is based on the idea that source image $I_s$ warped with the predicted flow $F_{s\rightarrow t}$ should align well with the target image $I_t$. In the original work, they use the structural similarity (SSIM) as the perception model. $$ L_{\text{recon}}\left(I_s, I_t\right) = \sum_{(x,y)\in I_t} M_t^{\text{cycle}}(x,y) \left( 1 - \text{SSIM}\left\lbrace I_s(x^\prime, y^\prime), I_t(x,y) \right\rbrace \right) $$ FIXME wtf is M_t doing here? ``` # DEBUG somehow dataset needs to reload everythime for correct coords from ransacflow.data import MegaDepthDataModule mega_depth = MegaDepthDataModule( "MegaDepth_cleansed.zip", train_image_size=224, train_batch_size=4, val_image_size=480, num_workers=8 ) ## parameter names log_dir = "MegaDepth_logs" ## from ransacflow.train import RANSACFlowModelStage1 ransac_flow = RANSACFlowModelStage1( alpha=0, beta=0, gamma=0, kernel_size=7, ssim_window_size=11, lr=2e-4, ) # FIXME unify TB logging location and experiment name trainer = Trainer( gpus=-1, fast_dev_run=False, max_epochs=200, val_check_interval=0.25, logger=TensorBoardLogger(log_dir, name="stage1"), callbacks=[EarlyStopping(monitor="val_loss", min_delta=0.01, patience=3)], ) trainer.fit(ransac_flow, mega_depth) ``` All following command line interface are copied from the original implementation, temporarily. ``` --nEpochs 200 --lr 2e-4 --kernelSize 7 --imgSize 224 --batchSize 16 --lambda-match 0.0, alpha --mu-cycle 0.0, beta --grad 0.0, gamma --trainMode flow --margin 88 ``` ## Stage 2 Train jointly the **reconstruction loss** and **cycle consistency of the flow**. Asides from the reconstruction loss mentioned in previous stage, we start to enforce cycle consistency of the flow by $$ L_{\text{cycle}} = \sum_{(x,y) \in I_t} M_t^{\text{circle}} (x,y) \left\lVert \left(x^\prime, y^\prime \right), \bm{F}_{t\rightarrow s}(x,y) \right\rVert_2 $$ FIXME what happened with (x^\prime, y^\prime), F_{t->s}? Are they multiplied? ``` from ransacflow.train import RANSACFlowModelStage2 ransac_flow = RANSACFlowModelStage2(alpha=0, beta=1, gamma=0, kernel_size=7, lr=2e-4) # FIXME unify TB logging location and experiment name trainer = Trainer( max_epochs=50, logger=TensorBoardLogger("tb_logs", name="RANSAC-Flow_stage2"), callbacks=[EarlyStoppping(monitor="val_loss", min_delta=0.01, patience=3)], ) trainer.fit(ransac_flow, MegaDepthDataModule) --nEpochs 50 --lr 2e-4 --kernelSize 7 --imgSize 224 --batchSize 16 --lambda-match 0.0, alpha --mu-cycle 1.0, beta --grad 0.0, gamma --trainMode flow --margin 88 ``` ## Stage 3 Train all three losses together: **reconstruction loss**, **cycle consistency of the flow**, and **matchability loss**. Matchability mask can be seen as pixel-wise weights for the reconstruction and cycle consistency loss. These losses encourage th matchability to be zero. To counteract this effect, the matchability loss encourages the matchability mask to be close to one. FIXME equation for matchability FIXME still doesn't understand what matchability actually implies, what is the difference between this and cycle loss? ``` from ransacflow.train import RANSACFlowModelStage3 ransac_flow = RANSACFlowModelStage3(alpha=0.01, beta=1, gamma=0, kernel_size=7, lr=2e-4) # FIXME unify TB logging location and experiment name trainer = Trainer( max_epochs=50, logger=TensorBoardLogger("tb_logs", name="RANSAC-Flow_stage3"), callbacks=[EarlyStoppping(monitor="val_loss", min_delta=0.01, patience=3)], ) trainer.fit(ransac_flow, MegaDepthDataModule) --nEpochs 50 --lr 2e-4 --kernelSize 7 --imgSize 224 --batchSize 16 --lambda-match 0.01, alpha --mu-cycle 1.0, beta --grad 0.0, gamma --trainMode flow+match --margin 88 ``` ## Stage 4.1 This additional stage fine tune on SOMETHING MAGICAL, so the output image introduce less distortions. TODO need to update description from the original paper ## Stage 4.2 This additional stage uses perceptual loss, TODO add description about why and how to use perceptual loss
github_jupyter
# Model-Based RL In this exercise you will implement a policy and model network which work in tandem to solve the CartPole reinforcement learning problem. What is a model and why would we want to use one? In this case, a model is going to be a neural network that attempts to learn the dynamics of the real environment. For example, in the CartPole we would like a model to be able to predict the next position of the Cart given the previous position and an action. By learning an accurate model, we can train our agent using the model rather than requiring to use the real environment every time. While this may seem less useful when the real environment is itself a simulation, like in our CartPole task, it can have huge advantages when attempting to learn policies for acting in the physical world. How are we going to accomplish this in Tensorflow? We are going to be using a neural network that will learn the transition dynamics between a previous observation and action, and the expected new observation, reward, and done state. Our training procedure will involve switching between training our model using the real environment, and training our agent’s policy using the model environment. By using this approach we will be able to learn a policy that allows our agent to solve the CartPole task without actually ever training the policy on the real environment! ### Loading libraries and starting CartPole environment ``` from __future__ import print_function import numpy as np try: import cPickle as pickle except: import pickle import tensorflow as tf %matplotlib inline import matplotlib.pyplot as plt import math import sys if sys.version_info.major > 2: xrange = range del sys import gym env = gym.make('CartPole-v0') ``` ### Setting Hyper-parameters ``` # hyperparameters H = 8 # number of hidden layer neurons learning_rate = 1e-2 gamma = 0.99 # discount factor for reward decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2 resume = False # resume from previous checkpoint? model_bs = 3 # Batch size when learning from model real_bs = 3 # Batch size when learning from real environment # model initialization D = 4 # input dimensionality ``` ### Policy Network ``` tf.reset_default_graph() observations = tf.placeholder(tf.float32, [None,4] , name="input_x") W1 = tf.get_variable("W1", shape=[4, H], initializer=tf.contrib.layers.xavier_initializer()) layer1 = tf.nn.relu(tf.matmul(observations,W1)) W2 = tf.get_variable("W2", shape=[H, 1], initializer=tf.contrib.layers.xavier_initializer()) score = tf.matmul(layer1,W2) probability = tf.nn.sigmoid(score) tvars = tf.trainable_variables() input_y = tf.placeholder(tf.float32,[None,1], name="input_y") advantages = tf.placeholder(tf.float32,name="reward_signal") adam = tf.train.AdamOptimizer(learning_rate=learning_rate) W1Grad = tf.placeholder(tf.float32,name="batch_grad1") W2Grad = tf.placeholder(tf.float32,name="batch_grad2") batchGrad = [W1Grad,W2Grad] ################################################################################ # TODO: Implement the loss function. # # This sends the weights in the direction of making actions that gave good # # advantage (reward overtime) more likely, and actions that didn't less likely.# ################################################################################ loglik = tf.log(input_y*(input_y - probability) + (1 - input_y)*(input_y + probability)) loss = -tf.reduce_mean(loglik * advantages) ################################################################################ # END OF YOUR CODE # ################################################################################ newGrads = tf.gradients(loss,tvars) updateGrads = adam.apply_gradients(zip(batchGrad,tvars)) ``` ### Model Network Here we implement a multi-layer neural network that predicts the next observation, reward, and done state from a current state and action. ``` mH = 256 # model layer size input_data = tf.placeholder(tf.float32, [None, 5]) with tf.variable_scope('rnnlm'): softmax_w = tf.get_variable("softmax_w", [mH, 50]) softmax_b = tf.get_variable("softmax_b", [50]) previous_state = tf.placeholder(tf.float32, [None,5] , name="previous_state") W1M = tf.get_variable("W1M", shape=[5, mH], initializer=tf.contrib.layers.xavier_initializer()) B1M = tf.Variable(tf.zeros([mH]),name="B1M") layer1M = tf.nn.relu(tf.matmul(previous_state,W1M) + B1M) W2M = tf.get_variable("W2M", shape=[mH, mH], initializer=tf.contrib.layers.xavier_initializer()) B2M = tf.Variable(tf.zeros([mH]),name="B2M") layer2M = tf.nn.relu(tf.matmul(layer1M,W2M) + B2M) wO = tf.get_variable("wO", shape=[mH, 4], initializer=tf.contrib.layers.xavier_initializer()) wR = tf.get_variable("wR", shape=[mH, 1], initializer=tf.contrib.layers.xavier_initializer()) wD = tf.get_variable("wD", shape=[mH, 1], initializer=tf.contrib.layers.xavier_initializer()) bO = tf.Variable(tf.zeros([4]),name="bO") bR = tf.Variable(tf.zeros([1]),name="bR") bD = tf.Variable(tf.ones([1]),name="bD") predicted_observation = tf.matmul(layer2M,wO,name="predicted_observation") + bO predicted_reward = tf.matmul(layer2M,wR,name="predicted_reward") + bR predicted_done = tf.sigmoid(tf.matmul(layer2M,wD,name="predicted_done") + bD) true_observation = tf.placeholder(tf.float32,[None,4],name="true_observation") true_reward = tf.placeholder(tf.float32,[None,1],name="true_reward") true_done = tf.placeholder(tf.float32,[None,1],name="true_done") predicted_state = tf.concat([predicted_observation,predicted_reward,predicted_done],1) observation_loss = tf.square(true_observation - predicted_observation) reward_loss = tf.square(true_reward - predicted_reward) done_loss = tf.multiply(predicted_done, true_done) + tf.multiply(1-predicted_done, 1-true_done) done_loss = -tf.log(done_loss) model_loss = tf.reduce_mean(observation_loss + done_loss + reward_loss) modelAdam = tf.train.AdamOptimizer(learning_rate=learning_rate) updateModel = modelAdam.minimize(model_loss) ``` ### Helper-functions ``` def resetGradBuffer(gradBuffer): for ix,grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0 return gradBuffer def discount_rewards(r): ################################################################################ # TODO: Implement the discounted rewards function # # Return discounted rewards weighed by gamma. Each reward will be replaced # # with a weight reward that involves itself and all the other rewards occuring # # after it. The later the reward after it happens, the less effect it has on # # the current rewards's discounted reward # # Hint: [r0, r1, r2, ..., r_N] will look someting like: # # [(r0 + r1*gamma^1 + ... r_N*gamma^N), (r1 + r2*gamma^1 + ...), ...] # ################################################################################ rnew = np.copy(r) for i in range(1, len(rnew)): rnew[:len(r)-i] += gamma**i * r[i:] return rnew ################################################################################ # END OF YOUR CODE # ################################################################################ # This function uses our model to produce a new state when given a previous state and action def stepModel(sess, xs, action): toFeed = np.reshape(np.hstack([xs[-1][0],np.array(action)]),[1,5]) myPredict = sess.run([predicted_state],feed_dict={previous_state: toFeed}) reward = myPredict[0][:,4] observation = myPredict[0][:,0:4] observation[:,0] = np.clip(observation[:,0],-2.4,2.4) observation[:,2] = np.clip(observation[:,2],-0.4,0.4) doneP = np.clip(myPredict[0][:,5],0,1) if doneP > 0.1 or len(xs)>= 300: done = True else: done = False return observation, reward, done ``` ## Training the Policy and Model ``` xs,drs,ys,ds = [],[],[],[] running_reward = None reward_sum = 0 episode_number = 1 real_episodes = 1 init = tf.global_variables_initializer() batch_size = real_bs drawFromModel = False # When set to True, will use model for observations trainTheModel = True # Whether to train the model trainThePolicy = False # Whether to train the policy switch_point = 1 # Launch the graph with tf.Session() as sess: rendering = False sess.run(init) observation = env.reset() x = observation gradBuffer = sess.run(tvars) gradBuffer = resetGradBuffer(gradBuffer) while episode_number <= 5000: # Start displaying environment once performance is acceptably high. if (reward_sum/batch_size > 150 and drawFromModel == False) or rendering == True : # env.render() rendering = True x = np.reshape(observation,[1,4]) tfprob = sess.run(probability,feed_dict={observations: x}) action = 1 if np.random.uniform() < tfprob else 0 # record various intermediates (needed later for backprop) xs.append(x) y = 1 if action == 0 else 0 ys.append(y) # step the model or real environment and get new measurements if drawFromModel == False: observation, reward, done, info = env.step(action) else: observation, reward, done = stepModel(sess,xs,action) reward_sum += reward ds.append(done*1) drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action) if done: if drawFromModel == False: real_episodes += 1 episode_number += 1 # stack together all inputs, hidden states, action gradients, and rewards for this episode epx = np.vstack(xs) epy = np.vstack(ys) epr = np.vstack(drs) epd = np.vstack(ds) xs,drs,ys,ds = [],[],[],[] # reset array memory if trainTheModel == True: ################################################################################ # TODO: Run the model network and compute predicted_state # # Output: 'pState' # ################################################################################ feed_dict = { previous_state: np.hstack([epx[:-1], np.array([1-y for y in epy][:-1])]), true_observation: epx[1:], true_reward: epr[1:], true_done: epd[1:] } tState = np.hstack([epx[1:], epr[1:], epd[1:]]) pState, _ = sess.run([predicted_state, updateModel], feed_dict=feed_dict) ################################################################################ # END OF YOUR CODE # ################################################################################ if trainThePolicy == True: ################################################################################ # TODO: Run the policy network and compute newGrads # # Output: 'tGrad' # ################################################################################ # compute the discounted reward backwards through time discounted_epr = discount_rewards(epr) # size the rewards to be unit normal (helps control the gradient estimator variance) discounted_epr -= np.mean(discounted_epr) discounted_epr //= np.std(discounted_epr) tGrad = sess.run(newGrads, feed_dict={observations: epx, input_y: epy, advantages: discounted_epr}) ################################################################################ # END OF YOUR CODE # ################################################################################ # If gradients becom too large, end training process if np.sum(tGrad[0] == tGrad[0]) == 0: break for ix,grad in enumerate(tGrad): gradBuffer[ix] += grad if switch_point + batch_size == episode_number: switch_point = episode_number if trainThePolicy == True: ################################################################################ # TODO: # # (1) Run the policy network and update gradients # # (2) Reset gradBuffer to 0 # ################################################################################ sess.run(updateGrads, feed_dict={W1Grad: gradBuffer[0], W2Grad: gradBuffer[1]}) ################################################################################ # END OF YOUR CODE # ################################################################################ running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01 if drawFromModel == False: print('World Perf: Episode %f. Reward %f. action: %f. mean reward %f.' % (real_episodes,reward_sum/real_bs,action, running_reward/real_bs)) if reward_sum/batch_size >= 200: break reward_sum = 0 # Once the model has been trained on 100 episodes if episode_number > 100: ################################################################################ # TODO: Alternating between training the policy from the model and training # # the model from the real environment. # ################################################################################ drawFromModel = not drawFromModel trainTheModel = not trainTheModel trainThePolicy = not trainThePolicy ################################################################################ # END OF YOUR CODE # ################################################################################ if drawFromModel == True: observation = np.random.uniform(-0.1,0.1,[4]) # Generate reasonable starting point batch_size = model_bs else: observation = env.reset() batch_size = real_bs print(real_episodes) ``` ### Checking model representation Here we can examine how well the model is able to approximate the true environment after training. The green line indicates the real environment, and the blue indicates model predictions. ``` plt.figure(figsize=(8, 12)) for i in range(6): plt.subplot(6, 2, 2*i + 1) plt.plot(pState[:,i]) # draw the model predictions plt.subplot(6,2,2*i+1) ################################################################################ # TODO: draw the real environment for comparison # ################################################################################ plt.plot(tState[:,i]) ################################################################################ # END OF YOUR CODE # ################################################################################ plt.tight_layout() ```
github_jupyter
<a href="https://colab.research.google.com/github/forest1988/colaboratory/blob/main/prophetnet_seq2seqtrainer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import torch torch.__version__ !git clone https://github.com/huggingface/transformers.git #git clone https://github.com/forest1988/transformers.git %cd transformers/ !pip install -e . %cd /content/transformers/examples/seq2seq/ # to avoid the "ModuleNotFoundError" import sys print(sys.path) sys.path.append('/content/transformers/src') print(sys.path) # If "ModuleNotFoundError" occurs in this cell, please re-run the runtime. # (The first time Colab runtime runs, it seems the sys.path is not correctly updated when we use `pip install -e .`) import transformers transformers.__version__ ``` ## dataset ``` !wget https://cdn-datasets.huggingface.co/summarization/xsum.tar.gz !tar -xzvf xsum.tar.gz # !export XSUM_DIR=${PWD}/xsum # %env XSUM_DIR=${PWD}/xsum import os os.environ['XSUM_DIR']=os.path.join(os.getcwd(), 'xsum') !echo $XSUM_DIR !ls $XSUM_DIR ``` ### Run BART-base ``` %%capture !pip install gitpython !pip install rouge_score !pip install sacrebleu !python finetune_trainer.py \ --learning_rate=3e-5 \ --do_train --do_eval --evaluate_during_training \ --predict_with_generate \ --n_train 20 \ --n_val 10 \ --model_name_or_path facebook/bart-base \ --data_dir $XSUM_DIR \ --output_dir bart-base-tmp \ --overwrite_output_dir !nvidia-smi ``` ## Run with ProphetNet (Error occurs) ``` from transformers import ProphetNetTokenizer, ProphetNetForConditionalGeneration #, ProphetNetForCausalLM # --- commeented out to avoid CUDA memory error --- # tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') # model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased') # input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 # decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 # outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, return_dict=True) # logits_next_token = outputs.logits # logits to predict next token as usual # logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens # test how tokeinzer.decoder works # print(tokenizer.decode(input_ids[0])) # print(tokenizer.decode(input_ids[0], skip_special_tokens=True)) # show the style of the output # print(type(outputs)) # outputs.keys() # outputs.logits.shape, outputs.logits_ngram.shape # out_generate = model.generate(input_ids, num_beams=4, max_length=20, early_stopping=True) # out_generate.shape, out_generate[0] # tokenizer.decode(out_generate.shape[0], skip_special_tokens=True) # --- NotImplementedError occurs here --- !python finetune_trainer.py \ --learning_rate=3e-5 \ --do_train --do_eval --evaluate_during_training \ --predict_with_generate \ --n_train 100 \ --n_val 10 \ --model_name_or_path microsoft/prophetnet-large-uncased \ --data_dir $XSUM_DIR \ --output_dir tmp \ --overwrite_output_dir ``` ### Refer to T5Tokenizer ``` python @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = None, truncation: bool = True, **kwargs, ) -> BatchEncoding: if max_length is None: max_length = self.max_len model_inputs = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length labels_and_decoder_mask = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) model_inputs["labels"] = labels_and_decoder_mask["input_ids"] return model_inputs ``` ## Try modified version of ProphetNet Tokenizer ``` %cd /content/ !ls import shutil if os.path.exists("transformers_modified"): print("The repository is already cloned. Remove and re-clone it.") shutil.rmtree("transformers_modified") !git clone -b forest1988-prophetnet-prepare-seq2seq-batch https://github.com/forest1988/transformers.git transformers_modified %cd transformers_modified/ !pip install -e . --upgrade %cd /content/transformers_modified/examples/seq2seq/ # Use already downloaded XSUM in /content/transformers/examples/seq2seq/xsum/ ``` ### Run on GPU ``` # For GPU (if runtime is set to TPU, the code below is run on CPU.) # max_source_lenght 510 !python finetune_trainer.py \ --learning_rate=3e-5 \ --do_train --do_eval --evaluate_during_training \ --max_source_length 510 \ --per_device_train_batch_size 2 \ --predict_with_generate \ --n_train 300 \ --n_val 100 \ --model_name_or_path microsoft/prophetnet-large-uncased \ --data_dir $XSUM_DIR \ --output_dir tmp_gpu \ --overwrite_output_dir # For GPU (if runtime is set to TPU, the code below is run on CPU.) # max_source_lenght 511 !python finetune_trainer.py \ --learning_rate=3e-5 \ --do_train --do_eval --evaluate_during_training \ --max_source_length 511 \ --per_device_train_batch_size 2 \ --predict_with_generate \ --n_train 300 \ --n_val 100 \ --model_name_or_path microsoft/prophetnet-large-uncased \ --data_dir $XSUM_DIR \ --output_dir tmp_gpu \ --overwrite_output_dir # For GPU (if runtime is set to TPU, the code below is run on CPU.) # max_source_lenght 512 !python finetune_trainer.py \ --learning_rate=3e-5 \ --do_train --do_eval --evaluate_during_training \ --max_source_length 512 \ --per_device_train_batch_size 2 \ --predict_with_generate \ --n_train 300 \ --n_val 100 \ --model_name_or_path microsoft/prophetnet-large-uncased \ --data_dir $XSUM_DIR \ --output_dir tmp_gpu \ --overwrite_output_dir ``` ### Run on TPU ``` # install torch_xla with PyTorch !pip install cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.7-cp36-cp36m-linux_x86_64.whl # For TPU !python xla_spawn.py --num_core 1 \ finetune_trainer.py \ --learning_rate=3e-5 \ --do_train --do_eval --evaluate_during_training \ --max_source_length 200 \ --per_device_train_batch_size 32 \ --prediction_loss_only \ --model_name_or_path microsoft/prophetnet-large-uncased \ --data_dir $XSUM_DIR \ --output_dir tmp_tpu \ --overwrite_output_dir %ls tmp_tpu !ls !cat tmp_tpu/eval_results.json !cat tmp_tpu/config.json ```
github_jupyter
# Classification of quantum states with high dimensional entanglement ## Circuits and computations Version compatible with 1st and 2d pilot studies ``` import numpy as np import copy from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer, execute, transpile, assemble from qiskit.tools.visualization import * from qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal, CompleteMeasFitter, TensoredMeasFitter) import json from scipy.signal import savgol_filter import time from qiskit.tools.monitor import job_monitor from o_utils import ora # classifier utilities from o_plot import opl # utilities for result plot from c_utils import new_cut # circuit building utilities def json_dic_loader(dic_name): f = open(data_directory+dic_name+'.json') return json.load(f) ``` #markdown for safety on demo def json_dic_dumper(dic, dic_name): with open(data_directory+dic_name+'.json', 'w') as f: json.dump(dic,f) ``` # common code for calling the classifier for ideal device and for real devices def add_single_dic(target_data_list): start_time = time.time() print("started",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name, "mitigation",mit_str,o_metric,model_name) # added for D,S,M choice. Mainstream : mixed set of 20 states first = 0 last = nb_states if unique_char == "D": last = int(nb_states/2) elif unique_char == "S": first = int(nb_states/2) # get the classifier error curve in function of the number of shot and the "safe shot number" error_curve, safe_rate, ernb = ora.provide_error_curve(PD_model=model_dic[model_name][first:last,:], PD_test=PD_test[first:last,:], trials=trials, window=window, epsilon=epsilon, max_shots=max_shots, pol=pol, verbosality=verbosality) tail = savgol_filter(ernb, window, pol, axis=0) len_curve = len(error_curve) safe_shot_nb = len_curve - int((window-1)/2) # OK print('safe_shot_nb',safe_shot_nb, 'safe_rate',safe_rate, "nb trials:",trials) err_rates = tail[int((window-1)/2),:]/trials err_rate_max = np.max(err_rates) err_rate_min = np.min(err_rates) r=4 print("savgol interpolated error rate mean:", np.round(np.mean(err_rates),r), "min:", np.round(err_rate_min,r), "max:", np.round(err_rate_max,r), "for", [ien for ien, jen in enumerate(err_rates) if jen == err_rate_max]) end_time = time.time() #save the data in a list of dictionaries : single_dic={"project":mitig_name, "id_gates":id_gates, "mitigation":mit_str, "model":model_name, "metric":o_metric, "device":project_device, "curve_length":len_curve, "shots": safe_shot_nb, "shots_rate": safe_rate, "error_curve":error_curve, "trials":trials,"window":window, "epsilon":epsilon,"SG_pol": pol, "computation_time":end_time-start_time, "time_completed":time.strftime('%d/%m/%Y %H:%M:%S'), "trials":trials, "QV": QV_dic[project_device], "fidelity": fidelity_dic[project_device], "error_nb":ernb} target_data_list.append(single_dic) print("completed",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name, "mitigation",mit_str,o_metric,model_name,"\n") ``` ## Set up the simulator and layout for 5 qubits ``` simulator = Aer.get_backend('qasm_simulator') #specify the layout of the devices used_qubits = 5 qubit_list = [0,1,2,3,4] #short_version = False #program_name="QAD" # 1st pilot project GHZ Psi+ / W Phi+ program_name="AL2" # 2d pilot project W Psi+ / Wbar Phi+ Flag_char = "DS" # this for a mix of two types of separable states if len(Flag_char) >= 2: unique_char = "M" else: unique_char = Flag_char # These dictionaries for the devices used in the study if program_name == "QAD": fidelity_dic = {'ibmq_athens': 0.925110, 'ibmq_valencia': 0.809101, 'ibmq_ourense': 0.802380, "ibmqx2": 0.627392, 'ibmq_santiago': 0.919399, 'ibmq_vigo': 0.908840, 'ideal_device': 1.0} data_directory = "data_files/" elif program_name == "AL2": fidelity_dic = {'ibmq_athens': 0.910145, 'ibmq_valencia': 0.794262, 'ibmq_ourense': 0.818974, "ibmqx2": 0.359528, 'ibmq_santiago': 0.900024, 'ibmq_vigo': 0.841831, 'ideal_device': 1.0} data_directory = "data2_files/" QV_dic = {'ibmq_athens': 32.0, 'ibmq_valencia': 16.0, 'ibmq_ourense': 8.0, "ibmqx2": 8.0, 'ibmq_santiago': 32.0, 'ibmq_vigo': 16.0, 'ideal_device': np.inf} dev_dic = {'ibmq_santiago': "San",'ibmq_athens': "Ath", 'ibmq_valencia': "Val", 'ibmq_vigo': 'Vig','ibmq_ourense': "Our", "ibmqx2": 'Yor', 'ideal_device': "Ide"} # specify the device: here first the ideal noise-free device project_device = 'ideal_device' device_name = dev_dic[project_device] # specify the nb of id gates between state creation and measurements # zero for the ideal device id_gates = 0 str_nb_id = str(id_gates) zfilled = str_nb_id.zfill(4-len(str_nb_id)) # tail of the file names for RAM storage mitig_name = program_name + "_" + device_name project_name = mitig_name + "_" + unique_char + zfilled print(mitig_name) print(project_name) # establish the result label list # meas_calibs will be used for mitigation in the real device section qr = QuantumRegister(used_qubits) meas_calibs, label_list = complete_meas_cal(qubit_list=qubit_list, qr=qr, circlabel='mcal') nb_labels=len(label_list) print(nb_labels,label_list) len(meas_calibs) # permutation list # here it is simple to write down the list, # but a version using itertools will be wellcome for >5 qubits projects if used_qubits == 5: q_perm = [[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 4, 2, 3], [0, 2, 3, 1, 4], [0, 2, 4, 1, 3], [0, 3, 4, 1, 2], [1, 2, 3, 0, 4], [1, 2, 4, 0, 3], [1, 3, 4, 0, 2], [2, 3, 4, 0, 1]] else: print("work in progress - meanwhile please provide the list of permutations") ``` ## Create the quantum states ``` # define the two subsets of 10 separable states if program_name == "QAD": state_1a = ["W","Phi+"] state_1b = ["GHZ","Psi+"] elif program_name == "ALT" or "AL2": state_1a = ["W","Psi+"] state_1b = ["Wbar","Phi+"] l_states = state_1a+state_1b l_states # version 20 circuits for demonstration # (in the version run on real devices: two batches of 10 circuits, "shallow" and "deep") # these circuits limited to state creation are ready to be saved # for ultimately building circuits adapted to noisy simulator and real devices # as option, these circuits will include a row of id gates between creation and measurements circ_ori = [] for i_s in range(0,len(l_states),2): for perm in q_perm: mycircuit = QuantumCircuit(used_qubits, used_qubits) mycircuit = new_cut.circuit_builder(mycircuit, perm, l_states[i_s],l_states[i_s+1]) circ_ori.append(mycircuit) # add measurement section to the circuit set newly created: nb_states = len(circ_ori) circ_ideal = copy.deepcopy(circ_ori) for i_state in range(nb_states): new_cut.add_barrier_and_measure(circ_ideal[i_state],qubit_list) ideal_dic = {} ``` ## Obtain result distributions on noise free simulator #### You may skip this section and go to: #### "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier" ``` # execute on noise free simulator s_sim = 12000 job_simul = execute(circ_ideal, backend=simulator, shots=s_sim) tot_results_simul = job_simul.result() # establish a dictionary of count results on noise free simulator: # (this step is only useful if ram storage is performed) void_counts = dict(zip(label_list, np.zeros(2**used_qubits))) tot_results_sim_dic = {} for i_state in range(nb_states): counts_simul = copy.deepcopy(void_counts) counts_simul.update(tot_results_simul.get_counts(i_state)) ideal_dic[str(i_state)]=counts_simul ``` #markdown for security json_dic_dumper(ideal_dic,"ideal_dic_"+project_name) Example of circuit for separable state of the first type ($W\otimes\Phi^+\; or\; W\otimes\Psi^+$): ``` i_state_test = 10 print(device_name, "circuit #",i_state_test) circ_ideal[i_state_test].draw(output='mpl') print(device_name, "circuit #",i_state_test) plot_histogram(ideal_dic[str(i_state_test)], legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ``` Example of circuit for separable state of the second type ($GHZ\otimes\Psi^+ \; or\; \bar{W}\otimes\Phi^+$): ``` i_state_test = 10 print(device_name, "circuit #",i_state_test) circ_ideal[i_state_test].draw(output='mpl') print(device_name, "circuit #",i_state_test) plot_histogram(ideal_dic[str(i_state_test)], legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ``` ### Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ``` # try loading the dictionary of results if its creation was skipped if len(ideal_dic) == 0: ideal_dic = json_dic_loader("ideal_dic_"+project_name) nb_states = len(ideal_dic) nb_labels = len(list(ideal_dic.values())[0]) s_sim = sum(list(ideal_dic.values())[0].values()) PD_ideal = np.ndarray((nb_states,nb_labels)) for i_state in range(nb_states): PD_ideal[i_state, :] = list(ideal_dic[str(i_state)].values()) # now a little trick to get the ideal values from the simulator approximated values with np.errstate(divide='ignore'): # ignore the divide by zero warning PD_ideal = 1/np.round(s_sim/(PD_ideal)) # have a look at the matrix head and tail: print("first and last state probability distributions:") print(np.round(np.vstack((PD_ideal[0:1,:],PD_ideal[-1:,:])),4)) ``` ## Monte Carlo simulation for the ideal device ``` # here will be appended the data we want for the curve plot ideal_data_list=[] ``` ### you may skip this cell and get stored curves by running the next cell ``` # you may want to skip this cell as it will require a long time # because of the high number of trials required by the Monte Carlo simulation for each nb o shots value # the following values are defined in the study summary (readme file): trials=100 # to be set to 10000 if not demo window=5 # shorter window than for the real device counts epsilon = .001 min_shots = 5 max_shots = 100 pol=2 subset = None # variable not used here verbosality = 5 # printing step for intermediate results when increasing the experiment shot number PD_test = PD_ideal mitigation_dic = {"Na": None} o_metrics_desired = ['jensenshannon', 'sqeuclidean'] model_dic = {"ideal_sim": PD_ideal} for mit_str, mitigation in mitigation_dic.items(): if mitigation != None: # thus only for counts on real device PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation, m_filter=meas_filter) for o_metric in o_metrics_desired: for model_name in model_dic.keys(): add_single_dic(ideal_data_list) ``` markdown for safety json_dic_dumper(ideal_data_list,"ideal_device_data_list_"+project_name) ``` # get the stored results of the Monte Carlo simulation in case you skipped the previous step if len(ideal_data_list) == 0: ideal_data_list = json_dic_loader("ideal_device_data_list_"+project_name) # have a look at the mean error rate curves and error rate at save shot number n_s # NB the r_hat_mean curves and legend reported r_hat_max errors the unsmoothed values opl.plot_curves(ideal_data_list,np.array([0,1]), "Jensen-Shannon vs squared euclidean distance - $\epsilon=0.001$" , ["model"], ["device","metric"], right_xlimit = 20, bottom_ylimit = -0.001, top_ylimit = 0.05) ``` # Real device section ``` from qiskit import IBMQ IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() project_device = 'ibmq_valencia'# you may choice here a different backend device_name = dev_dic[project_device] mitig_name = program_name + "_" + device_name print(mitig_name) #determine here the backend device = provider.get_backend(project_device) # the backend names are listed here above properties = device.properties() coupling_map = device.configuration().coupling_map ``` # obtain mitigation filter #markdown for demo nb_shots_cal = 8192 # set here the number of shots for the calibration phase print("backend:", device.name(), "qubit_list:", qubit_list) job_cal = execute(meas_calibs, backend=device, shots=nb_shots_cal) print(job_cal.job_id()) job_monitor(job_cal) time_exp = time.strftime('%d/%m/%Y %H:%M:%S') print("DMY: ",time_exp) #markdown for demo #here we save mitigation results cal_results = job_cal.result() cal_results_dic = cal_results.to_dict() #to make date in dictionary serializable if there is a 'date' key: if 'date' in cal_results_dic.keys(): cal_results_dic['date']=str(cal_results_dic['date']) #markdown for demo and security #dump json_dic_dumper(cal_results_dic,"cal_results_dic_"+ mitig_name) ``` # retrieve the corresponding measurement mitigation filter obtained at experimental time # use a fake job because use of the from_dict method simulator = Aer.get_backend('qasm_simulator') fake_job_cal = execute(meas_calibs, backend=simulator, shots=1) fake_cal_results = fake_job_cal.result() cal_results_dic = json_dic_loader("cal_results_dic_"+mitig_name) if 'date' in cal_results_dic.keys(): str(cal_results_dic['date']) cal_results = fake_cal_results.from_dict(cal_results_dic) meas_fitter = CompleteMeasFitter(cal_results, label_list, qubit_list=qubit_list, circlabel='mcal') meas_filter = meas_fitter.filter # have a look at the average measurement fidefily of this device: print("Average Measurement Fidelity was: %f" % meas_fitter.readout_fidelity(), "for",project_device) ``` ### Transpile the basic circuits for running on real device In this demo, these are not the circuits which were actually run on real devices (not the same transpiler seed). The optimization level is set to 2 instead of 3 in real experiments, for speed and also because at this moment there is a transpiler error occuring for ibmqx2: 'Maximum iteration reached. max_iteration=1000' ``` id_gates = 0 str_nb_id = str(id_gates) zfilled = str_nb_id.zfill(4-len(str_nb_id)) project_name = mitig_name + "_" + unique_char + zfilled print(project_name) # transpile verbose = True summary_dic = {} seed_transpiler_list = list(range(nb_states)) real_circs = [] start_time = time.strftime('%d/%m/%Y %H:%M:%S') print("Start at DMY: ",start_time) for i_state in list(range(nb_states)): # prepare circuit to be transpiled circuit = copy.deepcopy(circ_ori[i_state]) if id_gates > 0: circuit.barrier() for id_gates_index in range(id_gates): for index, value in enumerate(qubit_list): circuit.id(value) new_cut.add_barrier_and_measure(circuit, qubit_list) summary = [] depth_list = [] Q_state_opt_new = transpile(circuit, backend=device, coupling_map = coupling_map, seed_transpiler=seed_transpiler_list[i_state], optimization_level=2, initial_layout=qubit_list) summary_dic[i_state] = {"depth": Q_state_opt_new.depth(), 'circuit':Q_state_opt_new} real_circs.append(Q_state_opt_new) if verbose: print("circuit %2i" % i_state,"length",summary_dic[i_state]["depth"], "DMY: ",time.strftime('%d/%m/%Y %H:%M:%S')) end_time = time.strftime('%d/%m/%Y %H:%M:%S') print("Completed at DMY: ",end_time) i_state_test = 10 print(project_device, "circuit #",i_state_test, "circuit length:",real_circs[i_state_test].depth()) #summary_dic[i_state_test]['depth']) # you may want to skip this if large nb of id gates before measurement real_circs[i_state_test].draw(output='mpl') #check a circuit on noise-free simulator job_simul = execute(real_circs[i_state_test], backend=simulator, shots=s_sim) print(project_device, "circuit #",i_state_test, "on noise free simulator") plot_histogram(job_simul.result().get_counts(), legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ``` # run job #markdown for demo #run the circuits nb_shots = 8192 print("backend:", device.name(), "qubit_list:", qubit_list) time_exp = time.strftime('%d_%m_%Y_%H_%M_%S') print("DMY: ",time_exp) job_real = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots) job_real_id = job_real.job_id() print("job id:", job_real_id) job_monitor(job_real) time_exp = time.strftime('%d_%m_%Y_%H_%M_%S') print("DMY: ",time_exp, "job id:", job_real_id) tot_results_real = job_real.result() empirical_dic ={} for i_state_count, state_count in enumerate(tot_results_real.get_counts()): empirical_dic[str(i_state_count)] = state_count #markdown for safety json_dic_dumper(job_real_id,"job_real_id_"+ project_name) #markdown for safety at demo json_dic_dumper(empirical_dic,"experimental_"+ project_name) #markdown for demo #2d JOB RUN nb_shots = 8192 #run the circuits print("backend:", device.name(), "qubit_list:", qubit_list) time_exp = time.strftime('%d_%m_%Y_%H_%M_%S') print("DMY: ",time_exp) job_test = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots) job_test_id = job_test.job_id() print("job id:", job_test_id) job_monitor(job_test) time_exp = time.strftime('%d_%m_%Y_%H_%M_%S') print("DMY: ",time_exp, "job id:", job_test_id) tot_results_test = job_test.result() test_dic ={} for i_state_count, state_count in enumerate(tot_results_test.get_counts()): test_dic[str(i_state_count)] = state_count #markdown for safety at demo json_dic_dumper(job_test_id,"job_test_id_"+ project_name) json_dic_dumper(test_dic,"test_"+ project_name) ### Load the transpiled circuits that were actually run ##### legacy: valid only for the GHZ Psi+ / W Phi- combination otherwise go instead to: #### "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier" ``` #changing keys of dictionary for merging: def key_change(ini_dict, i_subset): ini_list = [] len_ini = len(ini_dict) for i in range(len_ini): ini_list.append(str(i+i_subset*len_ini)) return dict(zip(ini_list, list(ini_dict.values()))) if program_name == "QAD": #retrieve the data corresponding to the 1st project lfc = list(Flag_char) circ_ideal =[] empirical_dic = {} for i_subset, subset in enumerate(lfc): qasm_circs_dic = json_dic_loader('qasm_circs_dic_QAD_'+device_name+'_'+ subset + zfilled) j=0 # j included for project with several transpilation sessions for each device - not used here qasm_circs = qasm_circs_dic[str(j)] nb_circs = len(qasm_circs) for i_circs in range(nb_circs): circ_ideal.append(QuantumCircuit().from_qasm_str(qasm_circs[i_circs])) empirical_dic = {**empirical_dic, **key_change(json_dic_loader("experimental"+"_"+mitig_name +"_"\ +subset+zfilled), i_subset)} test_dic = copy.deepcopy(empirical_dic) #nb_states = len(circ_ideal) ``` ### Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ``` if program_name == "AL2": empirical_dic = json_dic_loader('experimental_'+project_name) test_dic = json_dic_loader('test_'+project_name) def rectify_counts(tot_res, test_cqi,mitigation,m_filter) : void_counts = dict(zip(label_list, np.zeros(2**used_qubits))) try: counts_results_real_test = tot_res[str(test_cqi)] except KeyError as error: counts_results_real_test = tot_res[test_cqi] raw_counts_test = copy.deepcopy(void_counts) raw_counts_test.update(counts_results_real_test) if mitigation: mitigated_results_test = meas_filter.apply(raw_counts_test, method = 'least_squares') returned_counts = copy.deepcopy(void_counts) returned_counts.update(mitigated_results_test) else: returned_counts = copy.deepcopy(raw_counts_test) return returned_counts ``` ### Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ``` def get_clean_matrix(dic, mitigation,m_filter): clean_matrix = np.ndarray((nb_states,nb_labels)) for i_state in range(nb_states): rectified_counts = rectify_counts(dic,i_state, mitigation,m_filter) # get a rectified counts dictionary clean_matrix[i_state, :] = list(rectified_counts.values()) clean_matrix = clean_matrix/clean_matrix.sum(axis=1, keepdims=True) return clean_matrix # We need to create a first matrix version. It will then vary for each considered set of distribution mitigation = False PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation, m_filter=meas_filter) print("first and last state probability distributions:") print(np.round(np.vstack((PD_exper[0:1,:],PD_exper[-1:,:])),3)) if program_name == "QAD": PD_test = copy.deepcopy(PD_exper) elif program_name == "AL2": mitigation = False PD_test = get_clean_matrix(test_dic, mitigation=mitigation, m_filter=meas_filter) print("first and last state probability distributions:") print(np.round(np.vstack((PD_test[0:1,:],PD_test[-1:,:])),3)) ``` ## Monte Carlo simulation for the real device ``` # here will be appended the data we want for the final plot of this notebook empirical_data_list=[] ``` ### you may want to skip this cell and get stored curves by running the next cell ``` # you may want to skip this cell as it will require a long time # because of the high number of trials required by the Monte Carlo simulation for each nb o shots value # the following values are defined in the study summary notebook: trials=100 # should be 1000 if not demo window=11 epsilon = .001 max_shots = 500 pol=2 verbosality = 10 # printing step for intermediate results when increasing the experiment shot number # In this section you can easily make your choice of combinations: # mitigation or not, metric, model mitigation_dic = {"no":False, "yes" : True} #mitigation_dic = {"no":False} #mitigation_dic = {"yes" : True} o_metrics_desired = ['jensenshannon', 'sqeuclidean'] #o_metrics_desired = ['jensenshannon'] #o_metrics_desired = ['sqeuclidean'] model_dic = {"empirical": PD_exper, "ideal_sim": PD_ideal} #model_dic = {"empirical": PD_exper} #model_dic = {"ideal_sim": PD_ideal} # Obtain a sequence of results in form of a list of dictionaries for mit_str, mitigation in mitigation_dic.items(): # here we toggle PD_exper as we toggled mitigation status PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation, m_filter=meas_filter) PD_test = get_clean_matrix(test_dic, mitigation=mitigation, m_filter=meas_filter) for o_metric in o_metrics_desired: print(project_name, model_dic.keys(), o_metric) for model_name in model_dic.keys(): add_single_dic(empirical_data_list) ``` markdown fo security json_dic_dumper(empirical_data_list,'Tnemp_data_list_'+project_name) ``` # get the stored results of the Monte Carlo simulation in case you skipped the previous step if len(empirical_data_list) == 0: empirical_data_list = json_dic_loader('Nemp_data_list_'+project_name) # have a look at the mean error rate curves and error rate at save shot number n_s # NB the r_hat_mean curves and legend reported r_hat_max errors are the unsmoothed values opl.plot_curves(ideal_data_list + empirical_data_list, np.array(range(2+len(empirical_data_list))), "$\epsilon=0.001$" , ["device"], ["model","metric","mitigation","id_gates"], right_xlimit = 80, bottom_ylimit = -0.02, top_ylimit = 1) import winsound duration = 2000 # milliseconds freq = 800 # Hz winsound.Beep(freq, duration) import qiskit.tools.jupyter %qiskit_version_table ```
github_jupyter
# The Quarterly Japanese Economic Model (Q-JEM) This workbook implement the "The Quarterly Japanese Economic Model (Q-JEM): 2019 version". At http://www.boj.or.jp/en/research/wps_rev/wps_2019/wp19e07.htm/ you will find the working paper describing the model and a zipfile containing all the relevant information needed to use the model. The model logic has been transformed from Eview equation to ModelFlow Business logic and the dataseries has been transformed to a Pandas Dataframe. In this workbook the impulse responses from the working paper section 3.1.1, 3.1.2, 3.1.3, and 3.1.4 has been recreated. # Import Python libraries ``` import pandas as pd import sys sys.path.append('modelflow/') from modelsandbox import newmodel import modelnet as mn ``` # Create model and dataframe ``` with open('QJEM/model/fqjem.frm','rt') as f: fqjem =f.read() baseline = pd.read_pickle('QJEM/data/dfqjem.pk') mqjem = newmodel(fqjem) mqjem.use_preorder = 1 # make a block decomposition of the model turbo = 0 # 1 sppeds up by compiling with Numba, 0 use straight python ``` # Define some variable labels ``` legend = { 'GDP' : 'Real gross domestic product, S.A.', 'CP' : 'Real private consumption, S.A.', 'U' : 'Unemployment rate, S.A.', 'PGDP' : 'GDP deflator', 'USGDP' : 'Real gross domestic product of the United States, S.A.', 'NUSGDP': 'Output gap of the rest of the world', 'EX': 'Real exports of goods and services, S.A.', 'IM' : 'Real imports of goods and services, S.A.', 'INV' : 'Real private non-residential investment, S.A.', 'CORE_CPI' : 'Consumer price index (all items, less fresh food), S.A.' } ``` # Run the baseline ``` res = mqjem(baseline,antal=50,first_test = 1,ljit=turbo,chunk=49,silent=1) ``` # Make experiment with Foreign GDP +1 percent point. ``` instruments = [ 'V_NUSGAP','V_USGAP'] target = baseline.loc['2005q1':,['USGDP','NUSGDP']].mfcalc('''\ USGDP = USGDP*1.01 NUSGDP = NUSGDP*1.01 ''',silent=1) resalt = mqjem.control(baseline,target,instruments,silent=1) ``` # Display the results ``` disp = mqjem['GDP CP INV EX IM CORE_CPI'].difpctlevel.mul100.rename(legend).plot( colrow=2,sharey=0,title='Impact of Foreign GDP +1 percent',top=0.9) ``` # Lower Oil prices ``` instruments = [ 'V_POIL'] target = baseline.loc['2005q1':,['POIL']].mfcalc('''\ POIL = POIL*0.9 ''',silent=1) resalt = mqjem.control(baseline,target,instruments,silent=1) disp = mqjem['GDP CP INV EX IM CORE_CPI'].difpctlevel.rename(legend).plot( colrow=2,sharey=0,title='Impact of 10 percent permanent decrease in oil price',top=0.9) ``` # Combine the two previous experiments ``` instruments = [ 'V_POIL','V_NUSGAP','V_USGAP'] target = baseline.loc['2005q1':,['POIL','USGDP','NUSGDP']].mfcalc('''\ POIL = POIL*0.9 USGDP = USGDP*1.01 NUSGDP = NUSGDP*1.01 ''',silent=1) resalt = mqjem.control(baseline,target,instruments,silent=1) disp = mqjem['GDP CP INV EX IM CORE_CPI'].difpctlevel.mul100.rename(legend).plot( colrow=2,sharey=0,title='Impact of Foreign gdp GDP +1 and percent 10 percent permanent decrease in oil price',top=0.9) ``` # A permanent depreciation of exchange rates. ``` instruments = [ 'V_FXYEN'] target = baseline.loc['2005q1':,['FXYEN']].mfcalc('''\ FXYEN = FXYEN*1.1 ''',silent=1) resalt = mqjem.control(baseline,target,instruments,silent=1) disp = mqjem['GDP CP INV EX IM CORE_CPI'].difpctlevel.mul100.rename(legend).plot( colrow=2,sharey=0,title='Impact of Foreign gdp GDP +1 and percent 10 percent permanent decrease in oil price',top=0.9) ``` # Draw the causal structure ``` fig2 = mn.draw_adjacency_matrix(mqjem.endograph,mqjem.precoreepiorder,mqjem._superstrongblock,mqjem._superstrongtype, size=(12,12)) ``` # How is CPQ determined ``` mqjem.cpq.draw(up=2,down=2,HR=0,svg=1,transdic= {'ZPI*' : 'ZPI'}) # we condens all ZPI to one, to make the chart easy ``` ## Also with values The result can be inspected in the graph/subfolder in PDF format. ``` mqjem.cpq.draw(up=1,down=1,HR=0,svg=1,transdic= {'ZPI*' : 'ZPI'},last=1) # we condens all ZPI to one, to make the chart easy ``` # Another Example ``` mqjem.ex.draw(up=1,down=1) mqjem.ex ```
github_jupyter
<a href="https://colab.research.google.com/github/madhavjk/cricket_analytics/blob/main/Impact_of_toss.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # IMPACT OF TOSS ON OUTCOME OF A MATCH <br>In this notebook, we will be analysing the impact of toss on the outcome of a match. and the steps we will be following are as follows-<br><br><br> <b> 1) Defining the context: </b> For any analysis, we will first need to develop an idea of the data that is required and also how are we going to breakdown the analysis into different categories. After finalising the structure of data to be used, the next task is to shortlist the parameters which we'll be using for performing the analysis.<br><br> <b> 2) Getting the data: </b> Now, the next task is to get the data in the required format discussed in previous step.<br><br> <b> 3) Pre-Processing the data: </b> After we get the data, the next step is to remove the extra information and clean the dataset to create a new one having the information which can be used for calculating parameters.<br><br> <b> 4) Calculating the parameters: </b> Now, we need to get the mathematical value of the parameters which we have shortlisted in the first step. So, in this step, we will use the preprocessed data to calculate the required parameters.<br><br> <b> 5) Conclusion: </b> Then using the calculated parameters, we try to find a trend from them and using this trend, we make a conclusion of our analysis. Now, let's look at each of these 5 steps one by one for our analysis on impact of toss on outcome of a match. ### Defining the context For sake of simplicity, we will only be considering the test matches, but the same analysis can be extrapolated to ODIs and T20Is as well. In this analysis, we will be loking at the impact of toss on the outcome of a match, so the data that we require should have the list of all the test matches that are played, their result, and the toss result. So, the necessary columns are team name, match result and toss result, for each test match happened. Also, for this analysis, we will be considering the recent generation's matches only, i.e. from 2016-present. This period is also selected because of the sudden increase in the of the use data from 2016. Now, since we are looking at test matches, there are 3 results possible for a match (win, lose, or draw), so the parameter that we should look for analysis should not be only focused on winning matches, but also on drawn test matches. So, for this analysis, we will consider lose percentage of a team, after they have one the toss and lost the toss. To summarize, <b>the data we will require is the scorecard data of each match, having result of the match and toss result, and the parameter we will be using for analysing the impact of toss will be the losing percentage of the team after they have won/lost the toss.</b> ### Getting the data The data required for this analysis in the above mentioned format can be found on statsguru website (by ESPNCricinfo) by applying concerned filters. The procedure to go to the required page is:<br> 1) Go to https://stats.espncricinfo.com/ci/engine/stats/index.html and click on team tab. <br> 2) Select "Home Venue" in Home or Away row and "Match Results" in View Format row. <br> 3) Give the Starting date from 01 Jan 2016 and click Submit Query.<br> 4) Go to page number 2 and copy the url. We have copied the url of page number 2 because when the first page is loaded, there is no mention of page number in the URL, but in the URL of page 2, there is a part as "page=2" which will be useful for us in automating the scraping for all pages in one go.<br><br> After we have the URL of the webpage, now we will create a web-scraper to scrape all the pages of the query that we gave. First we will create a scaper to scrape one page, and then we will repeat the same steps for all the pages to scrape all the pages. ``` import pandas as pd from bs4 import BeautifulSoup from urllib.request import urlopen url = 'https://stats.espncricinfo.com/ci/engine/stats/index.html?class=1;home_or_away=1;orderby=start;orderbyad=reverse;page=1;spanmin1=01+Jan+2016;spanval1=span;template=results;type=team;view=results' # URL of page number 1 ``` To open the webpage in the python notebook, we will use "urlopen" function. It will create a HTTP Response between python notebook and the webpage. ``` text = urlopen(url) # opening the webpage in python print(text) ``` Using the link that is being created, we will get the HTML of the webpage using BeautifulSoup library ``` soup = BeautifulSoup(text, "lxml") # getting the HTML of the webpage using BeautifulSoup(_HTTP_Response_, "lxml") print(soup) ``` Now, we will analyse the webpage and try to find tags and attributes which makes our desired table different from all other elements on webpage. So, after analysing the webpage, it is found that the table of our interest is different from other elements with tag as table and class as engineTable. Also amongst all the tables with class engineTable, it is the only table having caption tag in it. So now, in the HTML of the webpage, we need to find all the tables with class engineTable, and amongst all those tables, we need to find the table having a caption tag inside it. ``` table = soup.findAll('table', attrs = {'class' : 'engineTable'}) # finding all the tables with class as engineTable in the HTML of webpage int_table = 0 # defining a variable which will store the HTML of the table of our interest for temp_table in table: # running a loop over all the tables with class engineTable caption_tag = temp_table.findAll('caption') # Looking for caption tag in each table's HTML if(len(caption_tag) > 0): # checking if there is any captio tag in the table int_table = temp_table # if a caption tag is found then save this table as the table of our interest print(int_table) ``` After getting the HTML code of the required table, now we will look for all the rows inside the table and then scrape the data row-wise ``` tr_list = int_table.findAll('tr', attrs = {'class': 'data1'}) # getting all the rows in the table print(len(tr_list)) ``` As it can be seen that now we have the HTML of all the 50 rows stored in tr_list variable with each element in tr_list as HTML of one row. Now, we will get column data of each row separately and save that into a list. So, for each row, we will look for all td tags and then for each td tag, we will clear all HTML tags from the code to get only the cleantext or the data that is written in that particular cell. Then we will append this cleantext into the row data and then we will append the entire ro data to the master data, i.e. the data for all the matches. ``` master_data = [] # list to contain all the rows in our table for tr in tr_list: td_list = tr.findAll('td') # finding all the td tags in a row row_data = [] # list to save the data of each row for td in td_list: td_str = str(td) # converting HTML of the td tag from BeautifulSoup HTML code to string datatype cleantext = BeautifulSoup(td_str, "lxml").get_text() # removing all the unecssary tags from the HTML of td to get only the text written in that tag on webpage row_data.append(cleantext) # appendng that data to row data master_data.append(row_data) # appending the entire row data to our master data (contaiing all rows) master_data ``` The above process is to get the data from 1 webpage, but in our query, we have total 5 webpages. Each of the webpage are same, only the data is different. So now we need to repeat the above process exactly for page 2, 3, 4, and 5. For doing so, we will use a for loop where our variable 'k' will vary from 1 to 5, and repeat the same lines of code for each page. The only difference in URL of these pages is "page=1" (for page 1), "page=2" (for page 2), "page=3" (for page 3) and so on till page=5. So, we will vary our k from 1 to 5 and edit the URL with page=k for each iteration, and run the above blocks of code for each page. ``` master_data = [] # list to contain all the rows in our tables of all the pages of the query for k in range(1,6): # varying value of k from 1 to 5 url = 'https://stats.espncricinfo.com/ci/engine/stats/index.html?class=1;home_or_away=1;orderby=start;orderbyad=reverse;page=' + str(k) + ';spanmin1=01+Jan+2016;spanval1=span;template=results;type=team;view=results' # URL of page number 'k' text = urlopen(url) # opening the webpage in python soup = BeautifulSoup(text, "lxml") # getting the HTML of the webpage using BeautifulSoup(_HTTP_Response_, "lxml") table = soup.findAll('table', attrs = {'class' : 'engineTable'}) # finding all the tables with class as engineTable in the HTML of webpage int_table = 0 # defining a variable which will store the HTML of the table of our interest for temp_table in table: # running a loop over all the tables with class engineTable caption_tag = temp_table.findAll('caption') # Looking for caption tag in each table's HTML if(len(caption_tag) > 0): # checking if there is any captio tag in the table int_table = temp_table # if a caption tag is found then save this table as the table of our interest tr_list = int_table.findAll('tr', attrs = {'class': 'data1'}) # getting all the rows in the table for tr in tr_list: td_list = tr.findAll('td') # finding all the td tags in a row row_data = [] # list to save the data of each row for td in td_list: td_str = str(td) # converting HTML of the td tag from BeautifulSoup HTML code to string datatype cleantext = BeautifulSoup(td_str, "lxml").get_text() # removing all the unecssary tags from the HTML of td to get only the text written in that tag on webpage row_data.append(cleantext) # appendng that data to row data master_data.append(row_data) # appending the entire row data to our master data (contaiing all rows) print(len(master_data)) ``` There were total 210 matches between 01 Jan 2016 and 25 Feb 2021, and we are also getting the same number of rows in our master_data. Hence our scraper has scraped the complete data from statsguru required for this analysis, now lets convert this master_data to a dataframe, so that we can use pandas then for working with the dataset. ``` master_data_df = pd.DataFrame(master_data) master_data_df # renaming the columns master_data_df.columns = ["Home", "Result", "Margin", "Toss", "Bat", "None1", "Opposition" , "Ground", "Date", "None2"] master_data_df #saving the data scraped master_data_df.to_csv('toss_data_2016_2021.csv') ``` ## Pre-processing the data Now, we have the data of all the matches from 2016-present, the next step is to pre process the data so as to make it easier for calculating parameter which in our case is losing percentage. To calculate losing percentage of each team, we need to have a column in our dataset which should show whether the team who won the toss lost the match (0:if the team winning the toss does not lose the match, 1: if the team winning the toss, lose the match). Now, to create this column, we will need to columns, one is the name of the team who won the toss, and second name of the team who lost the match (in case of draw, the column value will be draw). So let us now create these 2 columns first.<br><br><br> Before we create these columns, let us first remove the unecessary columns from the table. Column "None1" and "None2" are empty columns, so we need to drop both of these columns. Also, in opposition column, before the name of oppositiion team, "v " is written, so we need to remove that "v " from the opposition column. ``` #dropping the columns master_data_df = master_data_df.drop(['None1', 'None2'], axis = 1) # .drop(list_of_columns_to_be_dropped) # axis=0, if we have to remove rows and axis=1, if we have to remove columns master_data_df # removing "v " from opposition column master_data_df["Opposition"] = master_data_df["Opposition"].str.replace("v ", "") # we have replaced "v " with empty string so that we now have only the name of the team in Opposition column master_data_df ``` Now, let us create the two columns, the first column will be the name of the team who won the toss, because in our table, we only have won and lost in the toss column, it does not have the name of the team who won the toss. To create this column, we will run a for loop on the entire dataframe and then for each row we will see if the Toss is won, then the team who won the toss is Home team, else it is the opposition team. ``` toss_team = [] # a list to contain each row's value of new column for index, row in master_data_df.iterrows(): # for loop on the dataframe if(row["Toss"] == "won"): # if the toss value is won, then the team who won the toss is Home team toss_team.append(row["Home"]) else: # if the toss value is lost, then the team who won the toss is opposition toss_team.append(row["Opposition"]) toss_team # Adding this column to dataframe master_data_df["toss_team"] = toss_team master_data_df ``` Now we will add the next column as the name of the team who lost the match (Draw in case of a draw). To add this column, we will again look at each row separately. For each row, if the Result is won, then the team lost the match is opposition, if the Result is lost, then the team lost the match is home, or if the Result is draw, then the match was drawn. ``` team_lost = [] # a list to contain the name of the team who lost the match for each row for index, row in master_data_df.iterrows(): # for loop for the entire dataframe if(row["Result"] == "won"): # if the Result is won, then the team lost the match is opposition team_lost.append(row["Opposition"]) elif(row["Result"] == "lost"): # if the Result is lost, then the team lost the match is Home team_lost.append(row["Home"]) else: # if the Result is draw, then the match is draw team_lost.append("draw") team_lost # Adding this column to dataframe master_data_df["team_lost"] = team_lost master_data_df ``` Now we will add the final column i.e. whether the team who won the toss, has lost the match or not.<br> 0: if the team who won the toss, didn't lose the match<br> 1: if the team who won the toss, lost the match ``` won_toss_lost_match = [] # defing a list that will contain the 0/1 values for each row in the dataframe for index, row in master_data_df.iterrows(): # for loop over the entire dataframe if(row["toss_team"] == row["team_lost"]): # if the team who lost the toss, has also lost the match, then the value will be 1 won_toss_lost_match.append(1) else: # if the team who lost the toss, has not lost the match, then the value will be 0 won_toss_lost_match.append(0) won_toss_lost_match # Adding the column to dataframe master_data_df["toss_data"] = won_toss_lost_match master_data_df ``` ## Calculating the parameters Now, we have the data in the desired format, we need to calculate the lose percentage for each team after they have won the toss. To do so, first of all we will need to find all the unique teams in our dataset and after that, for each team, we will see the total number of instances where the team has won the toss, and then we will look at the the total number of instances where the value of "toss_data" is 1 for that team. Using these 2 values we will get the losing percentage of the team ((total instances of "toss_data = 1")/(total instances where team has won the toss)) Let us now first find the unique teams in our dataset: ``` teams = master_data_df["toss_team"].unique().tolist() # first we use the unique() function to find all the unique values in # the "toss_team" column and then converted that output array into a list using tolist() function because we are more # comfortable working with lists. teams ``` After finding the unique list of teams, now we will look at each team separately and calculate losing percentage of each team. For each team, first we will filter the dataset to contain only the rows where toss_team is the team we are considering, and then we will find the total number of occurances of 1 in toss_data column in this filtered dataframe. After that we will divide this number by the total number of rows for that team to calculate lose percentage. ``` loss_percentage = [] # loss percentage list for each team for team in teams: # running a for loop on teams list, taking one team per iteration filtered_df = master_data_df[master_data_df["toss_team"] == team] # filtering the data where toss_team is equal to the name of our concerened team num_of_matches = len(filtered_df) # calculating total number of matches where the team has won the toss lost_df = filtered_df[filtered_df["toss_data"] == 1] # filtering only the rows where team has lost the match after winning the toss lost_count = len(lost_df) # calculating number of times the team has lost the match after winning the toss loss_percent = lost_count/num_of_matches*100 # calculating loss percentage temp = [] # creating an empty list to store losing percentage and team name temp.append(team) # adding team name in temp list temp.append(loss_percent) # adding loss_percent in temp list loss_percentage.append(temp) # adding temp list to the main loss percentage list loss_percentage ``` ## Conclusion We will not be considering Afghanistan and Ireland as the sample size is less for them. For India, Australia, New Zealand and South Africa, losing percentage is at max 20% after winning the toss, which indicate that these teams uses the toss for their benefit and toss plays an important role in the outcome of the match. Also, for India it is only 5% which indicates that team India knows how to use the toss to their benefit very well. Teams like England, Bangladesh, West Indies and Pakistan are yet to figure out how to use the toss for their benefit as their losing percentages are high even after winning the toss. So, to conclude, it can be said that in test matches, toss plays an important role for top teams as they have less losing percentages whereas other teams are yet to take advantage of toss in tests.
github_jupyter
# Temporal Difference: On-policy n-Tuple Expected Sarsa, Stochastic ``` import numpy as np ``` ## Create environment ``` def create_environment_states(): """Creates environment states. Returns: num_states: int, number of states. num_terminal_states: int, number of terminal states. num_non_terminal_states: int, number of non terminal states. """ num_states = 16 num_terminal_states = 2 num_non_terminal_states = num_states - num_terminal_states return num_states, num_terminal_states, num_non_terminal_states def create_environment_actions(num_non_terminal_states): """Creates environment actions. Args: num_non_terminal_states: int, number of non terminal states. Returns: max_num_actions: int, max number of actions possible. num_actions_per_non_terminal_state: array[int], number of actions per non terminal state. """ max_num_actions = 4 num_actions_per_non_terminal_state = np.repeat( a=max_num_actions, repeats=num_non_terminal_states) return max_num_actions, num_actions_per_non_terminal_state def create_environment_successor_counts(num_states, max_num_actions): """Creates environment successor counts. Args: num_states: int, number of states. max_num_actions: int, max number of actions possible. Returns: num_state_action_successor_states: array[int], number of successor states s' that can be reached from state s by taking action a. """ num_state_action_successor_states = np.repeat( a=1, repeats=num_states * max_num_actions) num_state_action_successor_states = np.reshape( a=num_state_action_successor_states, newshape=(num_states, max_num_actions)) return num_state_action_successor_states def create_environment_successor_arrays( num_non_terminal_states, max_num_actions): """Creates environment successor arrays. Args: num_non_terminal_states: int, number of non terminal states. max_num_actions: int, max number of actions possible. Returns: sp_idx: array[int], state indices of new state s' of taking action a from state s. p: array[float], transition probability to go from state s to s' by taking action a. r: array[float], reward from new state s' from state s by taking action a. """ sp_idx = np.array( object=[1, 0, 14, 4, 2, 1, 0, 5, 2, 2, 1, 6, 4, 14, 3, 7, 5, 0, 3, 8, 6, 1, 4, 9, 6, 2, 5, 10, 8, 3, 7, 11, 9, 4, 7, 12, 10, 5, 8, 13, 10, 6, 9, 15, 12, 7, 11, 11, 13, 8, 11, 12, 15, 9, 12, 13], dtype=np.int64) p = np.repeat( a=1.0, repeats=num_non_terminal_states * max_num_actions * 1) r = np.repeat( a=-1.0, repeats=num_non_terminal_states * max_num_actions * 1) sp_idx = np.reshape( a=sp_idx, newshape=(num_non_terminal_states, max_num_actions, 1)) p = np.reshape( a=p, newshape=(num_non_terminal_states, max_num_actions, 1)) r = np.reshape( a=r, newshape=(num_non_terminal_states, max_num_actions, 1)) return sp_idx, p, r def create_environment(): """Creates environment. Returns: num_states: int, number of states. num_terminal_states: int, number of terminal states. num_non_terminal_states: int, number of non terminal states. max_num_actions: int, max number of actions possible. num_actions_per_non_terminal_state: array[int], number of actions per non terminal state. num_state_action_successor_states: array[int], number of successor states s' that can be reached from state s by taking action a. sp_idx: array[int], state indices of new state s' of taking action a from state s. p: array[float], transition probability to go from state s to s' by taking action a. r: array[float], reward from new state s' from state s by taking action a. """ (num_states, num_terminal_states, num_non_terminal_states) = create_environment_states() (max_num_actions, num_actions_per_non_terminal_state) = create_environment_actions( num_non_terminal_states) num_state_action_successor_states = create_environment_successor_counts( num_states, max_num_actions) (sp_idx, p, r) = create_environment_successor_arrays( num_non_terminal_states, max_num_actions) return (num_states, num_terminal_states, num_non_terminal_states, max_num_actions, num_actions_per_non_terminal_state, num_state_action_successor_states, sp_idx, p, r) ``` ## Set hyperparameters ``` def set_hyperparameters(): """Sets hyperparameters. Returns: num_episodes: int, number of episodes to train over. maximum_episode_length: int, max number of timesteps for an episode. num_qs: int, number of state-action-value functions Q_i(s, a). alpha: float, alpha > 0, learning rate. epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off, higher means more exploration. gamma: float, 0 <= gamma <= 1, amount to discount future reward. """ num_episodes = 10000 maximum_episode_length = 200 num_qs = 3 alpha = 0.1 epsilon = 0.1 gamma = 1.0 return num_episodes, maximum_episode_length, num_qs, alpha, epsilon, gamma ``` ## Create value function and policy arrays ``` def create_value_function_arrays(num_qs, num_states, max_num_actions): """Creates value function arrays. Args: num_qs: int, number of state-action-value functions Q_i(s, a). num_states: int, number of states. max_num_actions: int, max number of actions possible. Returns: q: array[float], keeps track of the estimated value of each state-action pair Q_i(s, a). """ q = np.repeat(a=0.0, repeats=num_qs * num_states * max_num_actions) q = np.reshape(a=q, newshape=(num_qs, num_states, max_num_actions)) return q def create_policy_arrays(num_non_terminal_states, max_num_actions): """Creates policy arrays. Args: num_non_terminal_states: int, number of non terminal states. max_num_actions: int, max number of actions possible. Returns: policy: array[float], learned stochastic policy of which action a to take in state s. """ policy = np.repeat( a=1.0 / max_num_actions, repeats=num_non_terminal_states * max_num_actions) policy = np.reshape( a=policy, newshape=(num_non_terminal_states, max_num_actions)) return policy ``` ## Create algorithm ``` # Set random seed so that everything is reproducible np.random.seed(seed=0) def initialize_epsiode(num_non_terminal_states): """Initializes epsiode with initial state. Args: num_non_terminal_states: int, number of non terminal states. Returns: init_s_idx: int, initial state index from set of non terminal states. """ # Randomly choose an initial state from all non-terminal states init_s_idx = np.random.randint( low=0, high=num_non_terminal_states, dtype=np.int64) return init_s_idx def epsilon_greedy_policy_from_state_action_function( max_num_actions, q, epsilon, s_idx, policy): """Create epsilon-greedy policy from state-action value function. Args: max_num_actions: int, max number of actions possible. q: array[float], keeps track of the estimated value of each state-action pair Q_i(s, a). epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off, higher means more exploration. s_idx: int, current state index. policy: array[float], learned stochastic policy of which action a to take in state s. Returns: policy: array[float], learned stochastic policy of which action a to take in state s. """ # Combine state-action value functions q = np.sum(a=q[:, s_idx, :], axis=0) # Save max state-action value and find the number of actions that have the # same max state-action value max_action_value = np.max(a=q) max_action_count = np.count_nonzero(a=q == max_action_value) # Apportion policy probability across ties equally for state-action pairs # that have the same value and zero otherwise if max_action_count == max_num_actions: max_policy_prob_per_action = 1.0 / max_action_count remain_prob_per_action = 0.0 else: max_policy_prob_per_action = (1.0 - epsilon) / max_action_count remain_prob_per_action = epsilon / (max_num_actions - max_action_count) policy[s_idx, :] = np.where( q == max_action_value, max_policy_prob_per_action, remain_prob_per_action) return policy def loop_through_episode( num_non_terminal_states, max_num_actions, num_state_action_successor_states, sp_idx, p, r, num_qs, q, policy, alpha, epsilon, gamma, maximum_episode_length, s_idx): """Loops through episode to iteratively update policy. Args: num_non_terminal_states: int, number of non terminal states. max_num_actions: int, max number of actions possible. num_state_action_successor_states: array[int], number of successor states s' that can be reached from state s by taking action a. sp_idx: array[int], state indices of new state s' of taking action a from state s. p: array[float], transition probability to go from state s to s' by taking action a. r: array[float], reward from new state s' from state s by taking action a. num_qs: int, number of state-action-value functions Q_i(s, a). q: array[float], keeps track of the estimated value of each state-action pair Q_i(s, a). policy: array[float], learned stochastic policy of which action a to take in state s. alpha: float, alpha > 0, learning rate. epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off, higher means more exploration. gamma: float, 0 <= gamma <= 1, amount to discount future reward. maximum_episode_length: int, max number of timesteps for an episode. s_idx: int, current state index. Returns: q: array[float], keeps track of the estimated value of each state-action pair Q_i(s, a). policy: array[float], learned stochastic policy of which action a to take in state s. """ # Loop through episode steps until termination for t in range(0, maximum_episode_length): # Choose policy for chosen state by epsilon-greedy choosing from the # state-action-value function policy = epsilon_greedy_policy_from_state_action_function( max_num_actions, q, epsilon, s_idx, policy) # Get epsilon-greedy action a_idx = np.random.choice( a=max_num_actions, p=policy[s_idx, :]) # Get reward successor_state_transition_idx = np.random.choice( a=num_state_action_successor_states[s_idx, a_idx], p=p[s_idx, a_idx, :]) reward = r[s_idx, a_idx, successor_state_transition_idx] # Get next state next_s_idx = sp_idx[s_idx, a_idx, successor_state_transition_idx] # Update state action value equally randomly selecting from the # state-action-value functions updating_q_idx = np.random.randint(low=0, high=num_qs, dtype=np.int64) q, policy, s_idx = update_q( num_non_terminal_states, max_num_actions, policy, alpha, epsilon, gamma, s_idx, a_idx, reward, next_s_idx, updating_q_idx, num_qs, q) if next_s_idx >= num_non_terminal_states: break # episode terminated since we ended up in a terminal state return q, policy def update_q( num_non_terminal_states, max_num_actions, policy, alpha, epsilon, gamma, s_idx, a_idx, reward, next_s_idx, updating_q_idx, num_qs, q): """Updates state-action-value function using multiple estimates. Args: num_non_terminal_states: int, number of non terminal states. max_num_actions: int, max number of actions possible. policy: array[float], learned stochastic policy of which action a to take in state s. alpha: float, alpha > 0, learning rate. epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off, higher means more exploration. gamma: float, 0 <= gamma <= 1, amount to discount future reward. s_idx: int, current state index. a_idx: int, current action index. reward: float, current reward from taking action a_idx in state s_idx. next_s_idx: int, next state index. updating_q_idx: int, index to which Q_i(s, a) we'll be updating. num_qs: int, number of state-action-value functions Q_i(s, a). q: array[float], keeps track of the estimated value of each state-action pair Q_i(s, a). Returns: q: array[float], keeps track of the estimated value of each state-action pair Q_i(s, a). policy: array[float], learned stochastic policy of which action a to take in state s. s_idx: int, new current state index. """ # Check to see if we actioned into a terminal state if next_s_idx >= num_non_terminal_states: delta = reward - q[updating_q_idx, s_idx, a_idx] q[updating_q_idx, s_idx, a_idx] += alpha * delta else: # Get next action, using expectation value q_indices = np.arange(num_qs) not_updating_q_idx = np.random.choice( a=np.extract(condition=q_indices != updating_q_idx, arr=q_indices)) not_updating_v_expected_value_on_policy = np.sum( a=policy[next_s_idx, :] * q[not_updating_q_idx, next_s_idx, :]) # Calculate state-action-function expectation delta = gamma * not_updating_v_expected_value_on_policy delta -= q[updating_q_idx, s_idx, a_idx] q[updating_q_idx, s_idx, a_idx] += alpha * (reward + delta) # Update state and action to next state and action s_idx = next_s_idx return q, policy, s_idx def on_policy_temporal_difference_n_tuple_expected_sarsa( num_non_terminal_states, max_num_actions, num_state_action_successor_states, sp_idx, p, r, num_qs, q, policy, alpha, epsilon, gamma, maximum_episode_length, num_episodes): """Loops through episodes to iteratively update policy. Args: num_non_terminal_states: int, number of non terminal states. max_num_actions: int, max number of actions possible. num_state_action_successor_states: array[int], number of successor states s' that can be reached from state s by taking action a. sp_idx: array[int], state indices of new state s' of taking action a from state s. p: array[float], transition probability to go from state s to s' by taking action a. r: array[float], reward from new state s' from state s by taking action a. num_qs: int, number of state-action-value functions Q_i(s, a). q: array[float], keeps track of the estimated value of each state-action pair Q_i(s, a). policy: array[float], learned stochastic policy of which action a to take in state s. alpha: float, alpha > 0, learning rate. epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off, higher means more exploration. gamma: float, 0 <= gamma <= 1, amount to discount future reward. maximum_episode_length: int, max number of timesteps for an episode. num_episodes: int, number of episodes to train over. Returns: q: array[float], keeps track of the estimated value of each state-action pair Q_i(s, a). policy: array[float], learned stochastic policy of which action a to take in state s. """ for episode in range(0, num_episodes): # Initialize episode to get initial state init_s_idx = initialize_epsiode(num_non_terminal_states) # Loop through episode and update the policy q, policy = loop_through_episode( num_non_terminal_states, max_num_actions, num_state_action_successor_states, sp_idx, p, r, num_qs, q, policy, alpha, epsilon, gamma, maximum_episode_length, init_s_idx) return q, policy ``` ## Run algorithm ``` def run_algorithm(): """Runs the algorithm.""" (num_states, num_terminal_states, num_non_terminal_states, max_num_actions, num_actions_per_non_terminal_state, num_state_action_successor_states, sp_idx, p, r) = create_environment() (num_episodes, maximum_episode_length, num_qs, alpha, epsilon, gamma) = set_hyperparameters() q = create_value_function_arrays(num_qs, num_states, max_num_actions) policy = create_policy_arrays(num_non_terminal_states, max_num_actions) # Print initial arrays print("\nInitial state-action value function") print(q) print("\nInitial policy") print(policy) # Run on policy temporal difference n-tuple expected sarsa q, policy = on_policy_temporal_difference_n_tuple_expected_sarsa( num_non_terminal_states, max_num_actions, num_state_action_successor_states, sp_idx, p, r, num_qs, q, policy, alpha, epsilon, gamma, maximum_episode_length, num_episodes) # Print final results print("\nFinal state-action value function") print(q) print("\nFinal policy") print(policy) run_algorithm() ```
github_jupyter
This is an example showing the prediction latency of various scikit-learn estimators. The goal is to measure the latency one can expect when doing predictions either in bulk or atomic (i.e. one by one) mode. The plots represent the distribution of the prediction latency as a boxplot. #### New to Plotly? Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! ### Version ``` import sklearn sklearn.__version__ ``` ### Imports This tutorial imports [StandardScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler), [train_test_split](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html#sklearn.model_selection.train_test_split), [scoreatpercentile](http://docs.scipy.org/doc/scipy-0.11.0/reference/generated/scipy.stats.scoreatpercentile.html#scipy.stats.scoreatpercentile), [make_regression](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html#sklearn.datasets.make_regression), [RandomForestRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor), [Ridge](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge), [SGDRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html#sklearn.linear_model.SGDRegressor), [SVR](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html#sklearn.svm.SVR) and [shuffle](http://scikit-learn.org/stable/modules/generated/sklearn.utils.shuffle.html#sklearn.utils.shuffle). ``` from __future__ import print_function from collections import defaultdict from plotly import tools import plotly.plotly as py import plotly.graph_objs as go import time import gc import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from scipy.stats import scoreatpercentile from sklearn.datasets.samples_generator import make_regression from sklearn.ensemble.forest import RandomForestRegressor from sklearn.linear_model.ridge import Ridge from sklearn.linear_model.stochastic_gradient import SGDRegressor from sklearn.svm.classes import SVR from sklearn.utils import shuffle ``` ### Calculations ``` fig1 = tools.make_subplots(rows=4, cols=1, subplot_titles=( 'Prediction Time per instance - Atomic, 100 feats', 'Prediction Time per instance - Bulk(100), 100 feats', 'Evolution of Prediction Time with #Features ', 'Prediction Throughput for different estimators (%d ' 'features)' % configuration['n_features'])) def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return '__file__' in globals() def atomic_benchmark_estimator(estimator, X_test, verbose=False): """Measure runtime prediction of each instance.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_instances, dtype=np.float) for i in range(n_instances): instance = X_test[[i], :] start = time.time() estimator.predict(instance) runtimes[i] = time.time() - start if verbose: print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile( runtimes, 50), max(runtimes)) return runtimes def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose): """Measure runtime prediction of the whole input.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_bulk_repeats, dtype=np.float) for i in range(n_bulk_repeats): start = time.time() estimator.predict(X_test) runtimes[i] = time.time() - start runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes))) if verbose: print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile( runtimes, 50), max(runtimes)) return runtimes def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False): """ Measure runtimes of prediction in both atomic and bulk mode. Parameters ---------- estimator : already trained estimator supporting `predict()` X_test : test input n_bulk_repeats : how many times to repeat when evaluating bulk mode Returns ------- atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the runtimes in seconds. """ atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose) bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose) return atomic_runtimes, bulk_runtimes def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False): """Generate a regression dataset with the given parameters.""" if verbose: print("generating dataset...") X, y, coef = make_regression(n_samples=n_train + n_test, n_features=n_features, noise=noise, coef=True) random_seed = 13 X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=n_train, random_state=random_seed) X_train, y_train = shuffle(X_train, y_train, random_state=random_seed) X_scaler = StandardScaler() X_train = X_scaler.fit_transform(X_train) X_test = X_scaler.transform(X_test) y_scaler = StandardScaler() y_train = y_scaler.fit_transform(y_train[:, None])[:, 0] y_test = y_scaler.transform(y_test[:, None])[:, 0] gc.collect() if verbose: print("ok") return X_train, y_train, X_test, y_test def benchmark(configuration): """Run the whole benchmark.""" X_train, y_train, X_test, y_test = generate_dataset( configuration['n_train'], configuration['n_test'], configuration['n_features']) stats = {} for estimator_conf in configuration['estimators']: print("Benchmarking", estimator_conf['instance']) estimator_conf['instance'].fit(X_train, y_train) gc.collect() a, b = benchmark_estimator(estimator_conf['instance'], X_test) stats[estimator_conf['name']] = {'atomic': a, 'bulk': b} cls_names = [estimator_conf['name'] for estimator_conf in configuration[ 'estimators']] runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names] boxplot_runtimes(runtimes, 'atomic', configuration, 1) runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names] boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'], configuration, 2) def n_feature_influence(estimators, n_train, n_test, n_features, percentile): """ Estimate influence of the number of features on prediction time. Parameters ---------- estimators : dict of (name (str), estimator) to benchmark n_train : nber of training instances (int) n_test : nber of testing instances (int) n_features : list of feature-space dimensionality to test (int) percentile : percentile at which to measure the speed (int [0-100]) Returns: -------- percentiles : dict(estimator_name, dict(n_features, percentile_perf_in_us)) """ percentiles = defaultdict(defaultdict) for n in n_features: print("benchmarking with %d features" % n) X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n) for cls_name, estimator in estimators.items(): estimator.fit(X_train, y_train) gc.collect() runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False) percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes, percentile) return percentiles def benchmark_throughputs(configuration, duration_secs=0.1): """benchmark throughput for different estimators.""" X_train, y_train, X_test, y_test = generate_dataset( configuration['n_train'], configuration['n_test'], configuration['n_features']) throughputs = dict() for estimator_config in configuration['estimators']: estimator_config['instance'].fit(X_train, y_train) start_time = time.time() n_predictions = 0 while (time.time() - start_time) < duration_secs: estimator_config['instance'].predict(X_test[[0]]) n_predictions += 1 throughputs[estimator_config['name']] = n_predictions / duration_secs return throughputs ``` ### Plot Results Boxplot Runtimes ``` def boxplot_runtimes(runtimes, pred_type, configuration, subplot): """ Plot a new `Figure` with boxplots of prediction runtimes. Parameters ---------- runtimes : list of `np.array` of latencies in micro-seconds cls_names : list of estimator class names that generated the runtimes pred_type : 'bulk' or 'atomic' """ cls_infos = ['%s<br>(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer']( estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] box_plot1 = go.Box(y=runtimes[0],showlegend=False,name=cls_infos[0], fillcolor='rgba(0.4,225, 128, 128)', line=dict(color="black", width=1)) box_plot2 = go.Box(y=runtimes[1],showlegend=False,name=cls_infos[1], fillcolor='rgba(0.4,225, 128, 128)', line=dict(color="black", width=1)) box_plot3 = go.Box(y=runtimes[2],showlegend=False,name=cls_infos[2], fillcolor='rgba(0.4,225, 128, 128)', line=dict(color="black", width=1)) fig1.append_trace(box_plot1, subplot, 1) fig1.append_trace(box_plot2, subplot, 1) fig1.append_trace(box_plot3, subplot, 1) axis='yaxis'+str(subplot) fig1['layout'][axis].update(title='Prediction Time (us)') axis='xaxis'+str(subplot) fig1['layout'][axis].update(ticks='Prediction Time (us)') ``` Plot n_features influence. ``` def plot_n_features_influence(percentiles, percentile): for i, cls_name in enumerate(percentiles.keys()): x = np.array(sorted([n for n in percentiles[cls_name].keys()])) y = np.array([percentiles[cls_name][n] for n in x]) line_plot = go.Scatter(x=x, y=y, showlegend=False, mode='lines', line=dict(color="red")) fig1.append_trace(line_plot, 3, 1) fig1['layout']['xaxis3'].update(title='#Features') fig1['layout']['yaxis3'].update(title='Prediction Time at %d%%-ile (us)' % percentile) def plot_benchmark_throughput(throughputs, configuration): fig, ax = plt.subplots(figsize=(10, 6)) cls_infos = ['%s<br>(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer']( estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] cls_values = [throughputs[estimator_conf['name']] for estimator_conf in configuration['estimators']] bar_plot = go.Bar(x=cls_infos, y= cls_values, showlegend=False, marker=dict( color=['red', 'green', 'blue'])) fig1.append_trace(bar_plot, 4, 1) fig1['layout']['yaxis4'].update(title='Throughput (predictions/sec)') ``` Plot data ``` start_time = time.time() # benchmark bulk/atomic prediction speed for various regressors configuration = { 'n_train': int(1e3), 'n_test': int(1e2), 'n_features': int(1e2), 'estimators': [ {'name': 'Linear Model', 'instance': SGDRegressor(penalty='elasticnet', alpha=0.01, l1_ratio=0.25, fit_intercept=True), 'complexity_label': 'non-zero coefficients', 'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)}, {'name': 'RandomForest', 'instance': RandomForestRegressor(), 'complexity_label': 'estimators', 'complexity_computer': lambda clf: clf.n_estimators}, {'name': 'SVR', 'instance': SVR(kernel='rbf'), 'complexity_label': 'support vectors', 'complexity_computer': lambda clf: len(clf.support_vectors_)}, ] } benchmark(configuration) # benchmark n_features influence on prediction speed percentile = 90 percentiles = n_feature_influence({'ridge': Ridge()}, configuration['n_train'], configuration['n_test'], [100, 250, 500], percentile) plot_n_features_influence(percentiles, percentile) # benchmark throughput throughputs = benchmark_throughputs(configuration) plot_benchmark_throughput(throughputs, configuration) stop_time = time.time() print("example run in %.2fs" % (stop_time - start_time)) fig1['layout'].update(height=2000) py.iplot(fig1) ``` ### License Authors: Eustache Diemert <[email protected]> License: BSD 3 clause ``` from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'Prediction-Latency.ipynb', 'scikit-learn/plot-prediction-latency/', 'Prediction Latency | plotly', ' ', title = 'Prediction Latency | plotly', name = 'Prediction Latency', has_thumbnail='true', thumbnail='thumbnail/prediction-latency.jpg', language='scikit-learn', page_type='example_index', display_as='real_dataset', order=9,ipynb='~Diksha_Gabha/2674') ```
github_jupyter
# Better Long-Term Stock Forecasts by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/) / [GitHub](https://github.com/Hvass-Labs/FinanceOps) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmlHaWuVxIA0pKL1yjryR0Z) ## Introduction The [previous paper](https://github.com/Hvass-Labs/FinanceOps/blob/master/01_Forecasting_Long-Term_Stock_Returns.ipynb) showed a strong predictive relationship between the P/Sales ratio and long-term returns of some individual stocks and the S&P 500 stock-market index. However, there was a considerable amount of noise in those scatter-plots, because we considered fixed investment periods of exactly 10 years, for example. So even though the P/Sales ratio was a strong predictor for the mispricing at the buy-time, it was impossible to predict the mispricing at the sell-time, because the stock-market could be in a bubble or in a crash 10 years into the future, which would distort the estimated returns. This paper presents a simple solution, which is to consider the average returns for all investment periods between 7 and 15 years, and then make a scatter-plot of the mean returns versus the P/Sales ratio. This produces incredibly smooth curves for estimating the future long-term returns of the S&P 500 and some individual stocks. Along with the [previous paper](https://github.com/Hvass-Labs/FinanceOps/blob/master/01_Forecasting_Long-Term_Stock_Returns.ipynb), this is a very important discovery and it has implications for many areas of both theoretical and applied finance. It means that the U.S. stock-market as a whole is not "efficient" and does not follow a purely "random walk" in the long-term. It is possible to estimate the future long-term return of the stock-market and some individual stocks from just a single indicator variable. ## Python Imports This Jupyter Notebook is implemented in Python v. 3.6 and requires various packages for numerical computations and plotting. See the installation instructions in the README-file. ``` %matplotlib inline # Imports from Python packages. import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter import pandas as pd import numpy as np import os # Imports from FinanceOps. from curve_fit import CurveFitReciprocal from data_keys import * from data import load_index_data, load_stock_data from returns import prepare_mean_ann_returns ``` ## Load Data We now load all the financial data we will be using. ``` # Define the ticker-names for the stocks we consider. ticker_SP500 = "S&P 500" ticker_JNJ = "JNJ" ticker_K = "K" ticker_PG = "PG" ticker_WMT = "WMT" # Load the financial data for the stocks. df_SP500 = load_index_data(ticker=ticker_SP500) df_JNJ = load_stock_data(ticker=ticker_JNJ) df_K = load_stock_data(ticker=ticker_K) df_PG = load_stock_data(ticker=ticker_PG) df_WMT = load_stock_data(ticker=ticker_WMT) ``` ## Plotting Functions These are helper-functions used for making plots. ``` def plot_psales(df, ticker, start_date=None): """ Plot the P/Sales ratio. :param df: Pandas DataFrame with PSALES. :param ticker: Ticker-name for the stock or index. :param start_date: Start-date for the plot. :return: Nothing. """ psales = df[PSALES][start_date:].dropna() psales.plot(title=ticker + " - P/Sales", grid=True) def plot_ann_returns(ticker, df, key=PSALES, min_years=7, max_years=15, use_colors=True): """ Create a single scatter-plot with P/Sales or P/Book vs. Mean Annualized Returns for e.g. 7-15 years. :param ticker: Ticker-name for the stock or index. :param df: Pandas DataFrame containing key and TOTAL_RETURN. :param key: Name of data-column to use e.g. PSALES or PBOOK. :param min_years: Min number of years for return periods. :param max_years: Max number of years for return periods. :param use_colors: Boolean whether to use colors in plot. :return: Nothing. """ # Prepare the data. # x is the P/Sales or P/Book and y is the Mean Ann. Returns. x, y = prepare_mean_ann_returns(df=df, key=key, min_years=min_years, max_years=max_years) # Create a single plot. fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(211) # Scatter-plot. if use_colors: # Give each dot in the scatter-plot a shade of blue # according to the date of the data-point. ax.scatter(x, y, c=list(range(len(x))), cmap='Blues', alpha=1.0, marker='o') else: # Use the same color for all dots. ax.scatter(x, y, marker='o') # First part of the title. title1 = "[{0}] {1} vs. {2}-{3} Years Mean Ann. Return" title1 = title1.format(ticker, key, min_years, max_years) # X-values for plotting fitted curves. x_min = np.min(x) x_max = np.max(x) x_range = np.arange(x_min, x_max, (x_max/x_min)/1000) # Plot reciprocal curve-fit. curve_fit_reciprocal = CurveFitReciprocal(x=x, y=y) y_pred = curve_fit_reciprocal.predict(x=x_range) ax.plot(x_range, y_pred, color='red') # Title with these curve-fit parameters. title2 = "Mean Ann. Return = {0:.1%} / " + key + " + {1:.1%}" title2 = title2.format(*curve_fit_reciprocal.params) # Combine and set the plot-title. title = "\n".join([title1, title2]) ax.set_title(title) # Set axis labels. ax.set_xlabel(key) ax.set_ylabel("Mean Ann. Return") # Convert y-ticks to percentages. # We use a custom FuncFormatter because PercentFormatter # is inconsistent with string-formatters used elsewhere. formatter = FuncFormatter(lambda y, _: '{:.0%}'.format(y)) ax.yaxis.set_major_formatter(formatter) # Show grid. ax.grid() # Show the plot. plt.show() ``` ## Case Study: S&P 500 The S&P 500 is a stock-market index consisting of the stocks of 500 of the largest companies in USA. The S&P 500 covers about 80% of the whole U.S. stock-market in terms of size so it is useful as a gauge for the entire U.S. stock-market. We consider the Total Return of the S&P 500 which is what you would get from investing in the S&P 500 and re-investing all dividends back into the S&P 500. We ignore all taxes here. The following scatter-plot shows the P/Sales ratio versus the Mean Annualized Returns of the S&P 500 for periods between 7 and 15 years. For each day we calculate the Total Return of the S&P 500 over the next 7-15 years, then we calculate the Mean Annualized Return from those, and then we put a blue dot in the scatter-plot for that date's P/Sales ratio and the Mean Annualized Return we just calculated. This process is continued for all days in the time-series, until we have calculated and plotted the P/Sales vs. Mean Annualized Return for all days. As can be seen from this scatter-plot, the P/Sales ratio is a very strong predictor for long investment periods between 7-15 years. We call the fitted red curve for the "return curve". ``` plot_ann_returns(ticker=ticker_SP500, df=df_SP500, key=PSALES, min_years=7, max_years=15, use_colors=True) ``` We can forecast the future long-term returns using the fitted "return curve" from the scatter-plot above. Towards the end of 2017, the P/Sales ratio was almost 2.2 for the S&P 500, which was about the previous high point of the "Dot-Com" bubble around year 2000. ``` df_SP500[PSALES].dropna().tail(1) plot_psales(df=df_SP500, ticker=ticker_SP500) ``` So if you purchased the S&P 500 in December 2017 at this P/Sales ratio and will keep the investment for more than 7 years, while reinvesting all dividends during those years (all taxes are ignored), then the formula forecasts an annualized return of about 1.35%: $$ Annualized\ Return = 14.4\% / (P/Sales) - 5.2\% = 14.4\% / 2.2 - 5.2\% \simeq 1.35\% $$ The formula cannot predict exactly what will happen in the future, because there might be a stock-market bubble or a crash in any given year. The formula merely predicts an average annualized return for long-term investments of about 7-15 years in the S&P 500. ## Case Study: Johnson & Johnson (JNJ) Now let us consider individual companies instead of a whole stock-market index. The first company we consider is Johnson & Johnson with the ticker symbol JNJ. This is a very large company with over 130.000 employees worldwide that manufacture a wide range of health-care related products. When we plot the P/Sales ratio versus the mean annualized return for 7-15 year periods, we see that the "return curve" fits quite well although there appears to be a few separate "return curves" for P/Sales ratios roughly between 2 and 3. The blue shades in the scatter-plot indicate the time of the data-points and suggest that the separate curves belong to different periods of time. More research would be needed to establish why these periods have different "return curves". Perhaps the periods had significantly different profit-margins or sales-growth. ``` plot_ann_returns(ticker=ticker_JNJ, df=df_JNJ, key=PSALES, min_years=7, max_years=15, use_colors=True) ``` Towards the end of 2017 the P/Sales ratio was about 4.9 which is close to the all-time historical highs experienced during the stock-market bubble around year 2000. ``` df_JNJ[PSALES].dropna().tail(1) plot_psales(df=df_JNJ, ticker=ticker_JNJ) ``` Using the formula for the fitted "return curve" from the scatter-plot above, we get this forecasted long-term return: $$ Annualized\ Return \simeq 77.9\% / (P/Sales) - 8.9\% \simeq 77.9\% / 4.9 - 8.9\% \simeq 7.0\% $$ So according to this formula, the annualized return of the JNJ stock will be around 7.0% if you own the stock for at least 7 years, when dividends are reinvested and ignoring taxes. Again there is the caveat that it is impossible to predict whether there will be a stock-market bubble or crash several years into the future, so the forecasted return is an average for 7-15 year investment periods. ## Case Study: Procter & Gamble (PG) Another very large company is Procter & Gamble with the ticker symbol PG, which sells a wide range of consumer products and has almost 100.000 employees. If we plot the P/Sales ratio versus the mean annualized return we get an incredibly regular curve of data-points. The red line shows a reciprocal curve-fit, which is apparently not the correct formula for this data, as it doesn't fit so well at the ends. You are encouraged to try and find a better curve-fit and a theoretical explanation why your formula is better. ``` plot_ann_returns(ticker=ticker_PG, df=df_PG, key=PSALES, min_years=7, max_years=15) ``` When we plot the historical P/Sales ratio, we see that at the end of 2017 it was around 3.5 which was near its all-time high experienced during the bubble around year 2000. ``` plot_psales(df=df_PG, ticker=ticker_PG) ``` Using the fitted reciprocal curve from the scatter-plot above, we get a forecasted return of about 6.1% per year, when dividends are reinvested without taxes: $$ Annualized\ Return \simeq 24.4\% / (P/Sales) - 0.9\% \simeq 24.4\% / 3.5 - 0.9\% \simeq 6.1\% $$ But it should again be noted that this formula doesn't fit so well towards the ends of the data, and looking at the scatter-plot suggests a slightly lower return of maybe 5.5%. ## Case Study: Kellogg's (K) The next company is Kellogg's which trades under the ticker symbol K. The company has about 33.000 employees and is especially known for making breakfast cereals. When we plot the P/Sales ratio versus the mean annualized return it shows a strong trend that higher P/Sales ratios gives lower long-term returns, although the curve-fit is not as good as for the other companies we studied above, especially for lower P/Sales ratios. The blue shades show the time of the data-points. It can be hard to see in this plot, but for P/Sales ratios between 1.50 and 1.75, there is a "blob" of light-blue data-points well above the fitted red curve. This clearly indicates that the outlying data-points belong to a specific period in time. But we would have to do more research into the financial data for that period, to uncover the reason why the returns are so different. ``` plot_ann_returns(ticker=ticker_K, df=df_K, key=PSALES, min_years=7, max_years=15, use_colors=True) ``` Towards the end of 2017 the P/Sales ratio was about 1.8 which was actually very close to the historical average. ``` df_K[PSALES].dropna().mean() plot_psales(df=df_K, ticker=ticker_K) ``` Using the fitted "return curve" from the scatter-plot above with the P/Sales ratio of 1.8 we get the forecasted return: $$ Annualized\ Return \simeq 27.5\% / (P/Sales) - 6.2\% \simeq 27.5\% / 1.8 - 6.2\% \simeq 9.1\% $$ So a forecasted return of about 9.1% per year over the next 7-15 years when dividends are reinvested without taxes. That is about 2% (percentage points) higher than the return forecasted for JNJ and 3% higher than forecasted for PG above. ## Case Study: Wal-Mart (WMT) Now let us consider the company Wal-Mart which trades under the ticker symbol WMT. It is an extremely large retail-company with about 2.3 million employees. If we plot the P/Sales ratio versus the mean annualized return, we see that the red curve fits very poorly. There seems to be several separate trends in the data, and the blue shades indicate that the trends belong to different periods in time. But more research into the company's financial history would be needed to uncover the reason for this, perhaps it is because of significantly different sales-growth, profit margins, etc. ``` plot_ann_returns(ticker=ticker_WMT, df=df_WMT, key=PSALES, min_years=7, max_years=15, use_colors=True) ``` ## Conclusion We have shown that the P/Sales ratio is a very strong predictor for the long-term returns of the S&P 500 index and some individual stocks. In the [previous paper](https://github.com/Hvass-Labs/FinanceOps/blob/master/01_Forecasting_Long-Term_Stock_Returns.ipynb) we considered fixed investment periods of e.g. 10 years, which meant that the investment return depended on the P/Sales ratio both at the time of buying and selling. This distorted the data because sometimes the stock-market would be in a bubble or crash 10 years later. In this paper we presented a simple solution by considering all investment periods between 7 and 15 years, and then using the average return instead. This averages out the distorting effects of future bubbles and crashes, so we get much more smooth data that only depends on the P/Sales ratio at the buy-time. We then fitted a reciprocal "return curve" to the scatter-plots, and although it generally had a very tight fit, it was not so accurate towards the end-points, thus suggesting that the reciprocal formula is not entirely correct for this data. It would be of great interest to not only find a mathematical model that fits better, but also a theoretical explanation why that model makes sense. Perhaps such a model would also allow us to use smaller amounts of data and take into account the changing economics of a business. Perhaps we could use such a model to forecast returns of more companies where the basic method does not work so well, such as Wal-Mart as demonstrated above. It should be stressed that the forecasted returns will also depend on a *qualitative* assessment of the company. If the company's future will be significantly different from its historical sales, profit-margins and growth, then the forecasted returns will be inaccurate. That is why this forecasting method is perhaps best used on broad stock-market indices such as the S&P 500, or companies whose products and markets are expected to be highly predictable long into the future. ## Research Ideas You are strongly encouraged to do more research on this topic. If you make any new discoveries then please let me know your results. To my knowledge, there are no academic studies of predicting the long-term returns of stocks and stock-markets as we have done here. This work has presented the basic idea and methodology, but a lot more research can be done on this subject and it may impact many areas of both theoretical and applied finance. Here are a few more research ideas to get you started, in addition to the ideas from the [previous paper](https://github.com/Hvass-Labs/FinanceOps/blob/master/01_Forecasting_Long-Term_Stock_Returns.ipynb): - Try other investment periods, for example 5 to 10 years. How does it change the scatter-plots and the fitted "return curves"? - Try using P/Book as the predictor signal. How does that affect the plots? Why? - Although the data in some of these scatter-plots is incredibly smooth, the reciprocal curve does not fit the data exactly, which suggests that it is the wrong formula for this data. Can you find a better formula and perhaps give a theoretical explanation why that is better? - What is the reason that some companies such as Wal-Mart have several different trend-lines in the scatter-plot? You will probably need to investigate the historical financial data to uncover the reason. Can you modify the forecasting method to somehow take this into account? ## License (MIT) Copyright (c) 2015-18 by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
github_jupyter
``` %load_ext autoreload %autoreload 2 %aimport utils_1_1 import pandas as pd import numpy as np import altair as alt from altair_saver import save import datetime import dateutil.parser from os.path import join from constants_1_1 import SITE_FILE_TYPES from utils_1_1 import ( read_loinc_df, get_site_file_paths, get_site_file_info, get_site_ids, read_full_daily_counts_df, get_visualization_subtitle, get_country_color_map, apply_theme, merge_single_site_country_adult_name, ) from web import for_website alt.data_transformers.disable_max_rows(); # Allow using rows more than 5000 ``` ## The 4CE Health Systems Participating spreadsheet must be downloaded from Google Sheets and moved to ``` data/Health_Systems_Participating.csv ``` because we need the number of hospitals, beds, and inpatient discharges per year in order to compute the country-level rates of change. ``` isHospitalizationData = False # Use hospitalization data? min_date = datetime.datetime(2020, 1, 27) + datetime.timedelta(hours=1) max_date = datetime.datetime(2020, 9, 29) + datetime.timedelta(hours=1) DATA_RELEASE = "2020-09-28" COHORT = "Adult" MERGE_SINGLE_SITE_COUNTRIES = False CATEGORY = "category" CATEGORY_OF_INTEREST = "new_positive_cases" COUNTRY_POPULATION = { # From https://data.worldbank.org/indicator/SP.POP.TOTL "France": 67059887, "USA": 328239523, "Germany": 83132799, "Italy": 60297396, "Singapore": 5703569, "Spain": 47076781, "UK": 66834405, } COUNTRY_HOSP_DISCHARGE = { # From https://data.oecd.org/healthcare/hospital-discharge-rates.htm "France": 18553.0, "USA": 10906.2, # https://hcup-us.ahrq.gov/faststats/NationalTrendsServlet "Germany": 25478.4, "Italy": 11414.6, "Singapore": 12700.4, # https://www.moh.gov.sg/resources-statistics/healthcare-institution-statistics/hospital-admission-rates-by-age-and-sex/hospital-admission-rates-by-age-and-sex-2017 "Spain": 10470.5, "UK": 12869.4, } # Un-comment if you want to merge Spain and Singapore #COUNTRY_POPULATION["Spain + Singapore"] = COUNTRY_POPULATION["Spain"] + COUNTRY_POPULATION["Singapore"] #COUNTRY_HOSP_DISCHARGE["Spain + Singapore"] = COUNTRY_HOSP_DISCHARGE["Spain"] + COUNTRY_HOSP_DISCHARGE["Singapore"] df = read_full_daily_counts_df() df.head() ``` ## Remove pediatric sites ``` df = df.loc[df["pediatric"] == False] df = df.drop(columns=["pediatric"]) df.head() # Remove RP401 non-pediatric data since RP401 is only listed as a pediatric site df = df.loc[~df["siteid"].isin(["RP401"])] df = df.replace(-99, np.nan) df = df.replace(-999, np.nan) df["num_sites"] = 1 # We only need the JHU data for the countries that exist in the 4CE data. COUNTRIES = df["country"].unique().tolist() COUNTRIES df["date"] = df["calendar_date"] df = df.drop(columns=["calendar_date"]) ``` ## Load participating sites metadata ``` sites_df = pd.read_csv(join("..", "data", "Health_Systems_Participating.tsv"), sep='\t', skiprows=2, header=None, thousands=',') sites_column_map = { 0: "site_name", 1: "siteid", 2: "city", 3: "country", 4: "patient_type", 6: "adult_num_hosp", 7: "adult_num_beds", 8: "adult_num_yearly_discharge", 10: "ped_num_hosp", 11: "ped_num_beds", 12: "ped_num_yearly_discharge", } sites_df = sites_df.rename(columns=sites_column_map) sites_df = sites_df[list(sites_column_map.values())] sites_df["pediatric"] = sites_df["patient_type"].apply(lambda t: t == "Pediatric") sites_df = sites_df.dropna(subset=["site_name"]) sites_df["siteid"] = sites_df["siteid"].apply(lambda x: x.upper()) sites_df.tail() # Drop the pediatric hospitals sites_df = sites_df.loc[sites_df["pediatric"] == False] ``` ## Take intersection of sites that have provided valid num_yearly_discharge counts and sites that have provided daily counts data ``` sites_df["adult_num_hosp"] = sites_df["adult_num_hosp"].apply(lambda x: str(x).replace(",", "")).astype(float) sites_df["adult_num_beds"] = sites_df["adult_num_beds"].apply(lambda x: str(x).replace(",", "")).astype(float) sites_df["adult_num_yearly_discharge"] = sites_df["adult_num_yearly_discharge"].apply(lambda x: str(x).replace(",", "")).astype(float) sites_df = sites_df.set_index("siteid") def get_num_hosp(sid): try: return sites_df.at[sid, "adult_num_hosp"] if pd.notna(sites_df.at[sid, "adult_num_hosp"]) else 1 except KeyError: return 1 df["num_hosps"] = df["siteid"].apply(get_num_hosp) sites_df.head() sites_df = sites_df.reset_index() sites_df = sites_df.dropna(subset=["adult_num_yearly_discharge"]) sites_in_sites_df = sites_df["siteid"].unique().tolist() sites_in_df = df["siteid"].unique().tolist() intersecting_sites = set(sites_in_sites_df).intersection(set(sites_in_df)) sites_df = sites_df.loc[sites_df["siteid"].isin(intersecting_sites)] df = df.loc[df["siteid"].isin(intersecting_sites)] intersecting_sites # Get number of sites after restricting to pediatrics # and after taking the intersection NUM_SITES = len(df["siteid"].unique().tolist()) sites_in_sites_df ``` ## If site is missing data for a particular date, use the most recent previous data point for that date ``` def convert_date(date_str): try: return dateutil.parser.parse(date_str) except: return np.nan max_date_str = str(max_date).split(" ")[0] all_date_country_df = pd.DataFrame() for siteid, cd_df in df.groupby(["siteid"]): min_date = cd_df["date"].min() min_date_str = str(min_date).split(" ")[0] num_days = (dateutil.parser.parse(max_date_str) - dateutil.parser.parse(min_date_str)).days cd_df = cd_df.copy() cd_df["date"] = cd_df["date"].astype(str) prev_date_row = None for day_offset in range(num_days): curr_date = dateutil.parser.parse(min_date_str) + datetime.timedelta(days=day_offset) curr_date_str = str(curr_date).split(" ")[0] try: curr_date_row = cd_df.loc[cd_df["date"] == curr_date_str].to_dict('records')[0] prev_date_row = curr_date_row except: prev_date_row['date'] = curr_date_str prev_date_row['num_sites'] = 0 prev_date_row['num_hosps'] = 0 cd_df = cd_df.append(prev_date_row, ignore_index=True) all_date_country_df = all_date_country_df.append(cd_df, ignore_index=True) all_date_country_df["date"] = all_date_country_df["date"].apply(convert_date) df = all_date_country_df ``` ## Subtract severe patients from all patients to get the "never severe"-like count ``` country_color_map = get_country_color_map(merge_single_site_countries=MERGE_SINGLE_SITE_COUNTRIES) if MERGE_SINGLE_SITE_COUNTRIES: df["country"] = df["country"].apply(merge_single_site_country_adult_name) country_sum_df = df.groupby(["country", "date"]).sum().reset_index() country_sum_df.head() COUNTRIES = country_sum_df["country"].unique().tolist() country_sum_df.tail() country_sum_df["num_patients_in_hospital_on_this_date_minus_severe"] = df["num_patients_in_hospital_on_this_date"] - df["num_patients_in_hospital_and_severe_on_this_date"] country_sum_df["cumulative_patients_all_minus_severe"] = df["cumulative_patients_all"] - df["cumulative_patients_severe"] country_sum_temp_df = pd.DataFrame(index=[], data=[], columns=[]) for country, country_df in country_sum_df.groupby("country"): country_df = country_df.copy() country_df["cum_diff_all"] = np.concatenate((np.array([np.nan]), np.diff(country_df["cumulative_patients_all"].values))) country_df["cum_diff_severe"] = np.concatenate((np.array([np.nan]), np.diff(country_df["cumulative_patients_severe"].values))) country_df["cum_diff_dead"] = np.concatenate((np.array([np.nan]), np.diff(country_df["cumulative_patients_dead"].values))) country_df["cum_diff_all_minus_severe"] = country_df["cum_diff_all"] - country_df["cum_diff_severe"] country_df["cum_diff_all"] = country_df["cum_diff_all"].clip(lower=0) country_df["cum_diff_severe"] = country_df["cum_diff_severe"].clip(lower=0) country_df["cum_diff_dead"] = country_df["cum_diff_dead"].clip(lower=0) country_df["cum_diff_all_minus_severe"] = country_df["cum_diff_all_minus_severe"].clip(lower=0) """ country_df["count"] = np.concatenate((np.array([np.nan]), np.diff(country_df["cumulative_count"].values))) country_df["cumulative_count"] = country_df["cumulative_count"].replace(0, np.nan) country_df["N0"] = country_df["cumulative_count"].shift(1) # N0 is the total case up to the day before country_df["n1"] = country_df["count"] # n1 is the case number this day country_df["n2"] = country_df["n1"].shift(1) # n2 is the case number yesterday country_df["percent_increase"] = (country_df["n1"] / country_df["N0"]) * 100 country_df['R'] = country_df["percent_increase"] # TODO: is this correct? # TODO: update CI formula country_df['C'] = country_df['R'] - 1 country_df['standard_error'] = country_df.apply(lambda obs: (obs['R']+np.power(obs['R'], 2))/obs['n2'], axis='columns') country_df['95_CI_below'] = country_df.apply(lambda obs: obs['C'] - 1.96*np.sqrt(obs['standard_error']), axis='columns') country_df['95_CI_above'] = country_df.apply(lambda obs: obs['C'] + 1.96*np.sqrt(obs['standard_error']), axis='columns') country_df = country_df.replace([np.inf, -np.inf], np.nan) """ country_sum_temp_df = country_sum_temp_df.append(country_df, ignore_index=True) country_sum_df = country_sum_temp_df country_sum_df.tail() ``` ## Obtain country-level daily counts from JHU CSSE ``` jhu_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv" jhu_df = pd.read_csv(jhu_url) jhu_df = jhu_df.rename(columns={"Country/Region": "country", "Province/State": "state"}) jhu_df = jhu_df.drop(columns=["Lat", "Long"]) # Countries have different ids in the JHU data than in the 4CE data country_map = { "US": "USA", "United Kingdom": "UK" } jhu_df["country"] = jhu_df["country"].apply(lambda c: country_map[c] if c in country_map else c) jhu_df = jhu_df.loc[~pd.notna(jhu_df["state"])] jhu_df = jhu_df.drop(columns=["state"]) if MERGE_SINGLE_SITE_COUNTRIES: jhu_df["country"] = jhu_df["country"].apply(merge_single_site_country_adult_name) jhu_df = jhu_df.loc[jhu_df["country"].isin(COUNTRIES)] jhu_df = jhu_df.melt(id_vars=["country"], var_name="date", value_name="cumulative_count") jhu_df["date"] = jhu_df["date"].astype(str) jhu_df = jhu_df.groupby(["country", "date"]).sum().reset_index() ######################################################## # If you want to use public hospitalization data, add here! ######################################################## if(isHospitalizationData): ### France fr = pd.read_csv("../data/hospitalization_france.csv") fr.date = fr.date.astype(str) fr.new_hospitalization = fr.apply(lambda x: x.new_hospitalization_per_100000 / 100000 * COUNTRY_POPULATION[x.country] / 7, axis='columns') fr = fr.rename(columns={'new_hospitalization': 'cumulative_count'}) fr.cumulative_count = fr.cumulative_count.cumsum() # already sorted by date, so no need to sort fr.to_csv("../data/hospitalization_france_calculated.csv") jhu_df = jhu_df[jhu_df.country != 'France'] jhu_df = jhu_df.append(fr) ### Germany de = pd.read_csv("../data/hospitalization_germany.csv") de.date = de.date.astype(str) de.new_hospitalization = de.apply(lambda x: x.new_hospitalization_per_100000 / 100000 * COUNTRY_POPULATION[x.country] / 7, axis='columns') de = de.rename(columns={'new_hospitalization': 'cumulative_count'}) de.cumulative_count = de.cumulative_count.cumsum() # already sorted by date, so no need to sort de.to_csv("../data/hospitalization_germany_calculated.csv") jhu_df = jhu_df[jhu_df.country != 'Germany'] jhu_df = jhu_df.append(de) ### USA us = pd.read_csv("../data/hospitalization_usa.csv") us.date = us.date.astype(str) #... # we already have raw new hospitalization admission data us = us.rename(columns={'new_hospitalization': 'cumulative_count'}) us.cumulative_count = us.cumulative_count.cumsum() # already sorted by date, so no need to sort us.to_csv("../data/hospitalization_usa_calculated.csv") jhu_df = jhu_df[jhu_df.country != 'USA'] jhu_df = jhu_df.append(us) ### Spain es = pd.read_csv("../data/hospitalization_spain.csv") es.date = es.date.astype(str) es.new_hospitalization = es.apply(lambda x: x.new_hospitalization_per_100000 / 100000 * COUNTRY_POPULATION[x.country] / 7, axis='columns') es = es.rename(columns={'new_hospitalization': 'cumulative_count'}) es.cumulative_count = es.cumulative_count.cumsum() # already sorted by date, so no need to sort es.to_csv("../data/hospitalization_spain_calculated.csv") es.country = 'Spain + Singapore' jhu_df = jhu_df[jhu_df.country != 'Spain + Singapore'] jhu_df = jhu_df.append(es) ######################################################## # End of data replacement ######################################################## jhu_df["date"] = jhu_df["date"].apply(convert_date) jhu_df = jhu_df.sort_values(by="date", ascending=True) jhu_df = jhu_df.loc[(jhu_df["date"] >= min_date) & (jhu_df["date"] <= max_date)] jhu_df["date_str"] = jhu_df["date"].astype(str) jhu_df_freeze = jhu_df.copy() jhu_roc_df = pd.DataFrame(index=[], data=[], columns=["country", "date", "cumulative_count"]) for country, country_df in jhu_df.groupby("country"): country_df = country_df.copy() country_df["count"] = np.concatenate((np.array([np.nan]), np.diff(country_df["cumulative_count"].values))) country_df["cumulative_count"] = country_df["cumulative_count"].replace(0, np.nan) country_df["N0"] = country_df["cumulative_count"].shift(1) # N0 is the total case up to the day before country_df["n1"] = country_df["count"] # n1 is the case number this day country_df["n2"] = country_df["n1"].shift(1) # n2 is the case number yesterday country_df["percent_increase"] = (country_df["n1"] / country_df["N0"]) * 100 country_df['R'] = country_df["percent_increase"] # TODO: is this correct? # TODO: update CI formula country_df['C'] = country_df['R'] - 1 country_df['standard_error'] = country_df.apply(lambda obs: (obs['R']+np.power(obs['R'], 2))/obs['n2'], axis='columns') country_df['95_CI_below'] = country_df.apply(lambda obs: obs['C'] - 1.96*np.sqrt(obs['standard_error']), axis='columns') country_df['95_CI_above'] = country_df.apply(lambda obs: obs['C'] + 1.96*np.sqrt(obs['standard_error']), axis='columns') country_df = country_df.replace([np.inf, -np.inf], np.nan) jhu_roc_df = jhu_roc_df.append(country_df, ignore_index=True) jhu_roc_df def get_jhu_cumulative_count(date_str, country): try: return jhu_roc_df.loc[(jhu_roc_df["date_str"] == date_str) & (jhu_roc_df["country"] == country)].reset_index().iloc[0]["cumulative_count"] except: return 0 # Start plotting after country has 100 cases count_threshold = 100 jhu_roc_df["jhu_past_100"] = jhu_roc_df["cumulative_count"] >= count_threshold # Transform the 4CE data to obtain normalized change values. def compute_change_4ce(df_dc, cumulative_count_colname, daily_count_colname, category): df_dc["cumulative_count"] = df_dc[cumulative_count_colname] df_dc["count"] = df_dc[daily_count_colname] # Sort dates df_dc["date"] = df_dc["date"].astype(str) df_dc["date"] = df_dc["date"].apply(convert_date) df_dc = df_dc.sort_values(by="date", ascending=True) df_dc = df_dc.loc[(df_dc["date"] >= min_date) & (df_dc["date"] <= max_date)] df_dc["date_str"] = df_dc["date"].astype(str) df_dc_freeze = df_dc.copy() dc_roc_df = pd.DataFrame(index=[], data=[], columns=["country", "date"]) for country, country_df in df_dc.groupby("country"): country_df = country_df.copy() country_df["N0"] = country_df["cumulative_count"].shift(1) # N0 is the total case up to the day before country_df["n1"] = country_df["count"] # n1 is the case number this day country_df["n2"] = country_df["n1"].shift(1) # n2 is the case number yesterday country_df["percent_increase"] = (country_df["n1"] / country_df["N0"]) * 100 country_df['R'] = country_df["percent_increase"] # TODO: is this correct? # TODO: update CI formula country_df['C'] = country_df['R'] - 1 country_df['standard_error'] = country_df.apply(lambda obs: (obs['R']+np.power(obs['R'], 2))/obs['n2'], axis='columns') country_df['95_CI_below'] = country_df.apply(lambda obs: obs['C'] - 1.96*np.sqrt(obs['standard_error']), axis='columns') country_df['95_CI_above'] = country_df.apply(lambda obs: obs['C'] + 1.96*np.sqrt(obs['standard_error']), axis='columns') country_df = country_df.replace([np.inf, -np.inf], np.nan) dc_roc_df = dc_roc_df.append(country_df, ignore_index=True) dc_roc_df.head() dc_roc_df = dc_roc_df[["country", "date", "num_sites", "num_hosps", "cumulative_count", "count", "date_str", "N0", "n1", "n2", "percent_increase", "R", "C", "standard_error", "95_CI_below", "95_CI_above"]] dc_roc_df["category"] = category dc_roc_df["jhu_past_100"] = dc_roc_df.apply(lambda row: get_jhu_cumulative_count(row["date_str"], row["country"]) >= count_threshold, axis='columns') return dc_roc_df dc_roc_df_all = compute_change_4ce(country_sum_df, "cumulative_patients_all", "cum_diff_all", "All") dc_roc_df_severe = compute_change_4ce(country_sum_df, "cumulative_patients_severe", "cum_diff_severe", "Severe") dc_roc_df_all_minus_severe = compute_change_4ce(country_sum_df, "cumulative_patients_all_minus_severe", "cum_diff_all_minus_severe", "All minus Severe") dc_roc_df = dc_roc_df_all.append(dc_roc_df_severe, ignore_index=True).append(dc_roc_df_all_minus_severe, ignore_index=True) dc_roc_df.to_csv("../data/dc_roc_df.csv") ``` ### Transform data for plots faceted by country ``` jhu_roc_df = jhu_roc_df.copy() dc_roc_df = dc_roc_df.copy() jhu_roc_df["source"] = "JHU CSSE" dc_roc_df["source"] = "4CE" join_df = jhu_roc_df.append(dc_roc_df, ignore_index=True) join_df["country_source"] = join_df.apply(lambda row: row["country"] + "_" + row["source"], axis='columns') join_df.head() ``` ## Normalized New Daily Cases First, obtain total hospital discharges for each country. $\texttt{country_total} = \text{country total in-patient-discharge}$ $\texttt{country_4CE_total} = \text{total in-patient-discharge in our sites within that country}$ $F0 = \frac{\texttt{country_total}}{\texttt{country_4CE_total}}$ $F0$ is used to normalize. - For new figure that shows daily case # per 100K, we will instead show - $\texttt{RATE} = \texttt{N_case} * F1$ - where $F1 = F0 * \frac{100K}{\texttt{country population}}$ - then the standard error for $\texttt{RATE}$ will be $\sqrt(\texttt{RATE}*F1)$ and the confidence interval will be $\texttt{RATE} \pm 1.96*\sqrt(\texttt{RATE}*F1)$ ``` # Get daily new cases from cumulative_count norm_jhu = jhu_roc_df.copy() # Ensure not to calculate diff between country # norm_jhu["count"] = norm_jhu["cumulative_count"].diff() u_countries = norm_jhu.country.unique() for c in u_countries: norm_jhu.loc[norm_jhu.country == c, "count"] = norm_jhu.loc[norm_jhu.country == c, "cumulative_count"].diff() norm_jhu.loc[norm_jhu["date"] == "2020-01-28", "count"] = np.nan # Make sure the start count is NaN norm_jhu[norm_jhu.country == "Germany"] norm_4ce = dc_roc_df.copy() ``` ## Compute F0 value for each country using COUNTRY_POPULATION numbers ``` country_sites_df = sites_df.groupby("country").sum().reset_index() country_sites_df = country_sites_df[["country", "adult_num_hosp", "adult_num_beds", "adult_num_yearly_discharge"]] country_sites_df = country_sites_df.rename(columns={ "adult_num_hosp": "4ce_num_hosp", "adult_num_beds": "4ce_num_beds", "adult_num_yearly_discharge": "4ce_num_yearly_discharge", }) country_sites_df["country_num_yearly_discharge_per_100000"] = country_sites_df["country"].apply(lambda c: COUNTRY_HOSP_DISCHARGE[c]) country_sites_df["country_population"] = country_sites_df["country"].apply(lambda c: COUNTRY_POPULATION[c]) if MERGE_SINGLE_SITE_COUNTRIES: country_sites_df["country"] = country_sites_df["country"].apply(merge_single_site_country_adult_name) country_sites_df = country_sites_df.groupby(["country"]).sum().reset_index() country_sites_df["F1"] = country_sites_df["country_num_yearly_discharge_per_100000"] / country_sites_df["4ce_num_yearly_discharge"] country_sites_df["F0"] = country_sites_df["F1"] * (country_sites_df["country_population"] / 100000) country_sites_df country_sites_df.to_csv("../data/CASE_RATE_CONSTANTS.csv") F0 = dict(zip(country_sites_df["country"].values.tolist(), country_sites_df["F0"].values.tolist())) F0 # Append the F0 values to each df norm_jhu["population"] = norm_jhu["country"].apply(lambda x: COUNTRY_POPULATION[x]) norm_jhu["F0"] = norm_jhu["country"].apply(lambda x: F0[x]) norm_4ce["population"] = norm_4ce["country"].apply(lambda x: COUNTRY_POPULATION[x]) norm_4ce["F0"] = norm_4ce["country"].apply(lambda x: F0[x]) # Compute adjusted counts norm_jhu["adjusted_count"] = norm_jhu["count"] norm_jhu["F1"] = 100000 / norm_jhu["population"] norm_jhu["RATE"] = norm_jhu["count"] * norm_jhu["F1"] norm_jhu["RATE_7_day_avg"] = norm_jhu["RATE"].rolling(7).mean().shift(-3) norm_jhu["std_error"] = norm_jhu["F1"] * norm_jhu["RATE"] norm_jhu["std_error"] = norm_jhu["std_error"].apply(lambda x: np.sqrt(x)) norm_jhu["ci_above"] = norm_jhu["RATE_7_day_avg"] + 1.96 * norm_jhu["std_error"] norm_jhu["ci_below"] = norm_jhu["RATE_7_day_avg"] - 1.96 * norm_jhu["std_error"] norm_4ce["adjusted_count"] = norm_4ce["F0"] * norm_4ce["count"] norm_4ce["F1"] = norm_4ce["F0"] * 100000 / norm_4ce["population"] norm_4ce["RATE"] = norm_4ce["count"] * norm_4ce["F1"] for c in COUNTRIES: c_filter = norm_4ce["country"] == c norm_4ce.loc[c_filter, "RATE_7_day_avg"] = norm_4ce.loc[c_filter, "RATE"].rolling(7).mean().shift(-3) # norm_4ce["RATE_7_day_avg"] = norm_4ce["RATE"].rolling(7).mean().shift(-3) norm_4ce["std_error"] = norm_4ce["F1"] * norm_4ce["RATE"] norm_4ce["std_error"] = norm_4ce["std_error"].apply(lambda x: np.sqrt(x)) norm_4ce["ci_above"] = norm_4ce["RATE_7_day_avg"] + 1.96 * norm_4ce["std_error"] norm_4ce["ci_below"] = norm_4ce["RATE_7_day_avg"] - 1.96 * norm_4ce["std_error"] norm_jhu_min_col = norm_jhu[['country', 'date', 'count', 'adjusted_count', 'RATE_7_day_avg', 'source', 'population', 'F0', 'F1', 'RATE', 'std_error', 'ci_above', 'ci_below', 'jhu_past_100']] norm_4ce_min_col = norm_4ce[['country', 'date', 'count', 'adjusted_count', 'RATE_7_day_avg', 'source', 'population', 'F0', 'F1', 'RATE', 'std_error', 'ci_above', 'ci_below', 'jhu_past_100', 'num_sites', 'num_hosps', 'category']] norm_jhu_min_col.head(10) norm_4ce_min_col.head(10) ``` ## Temporary: select the All category of 4CE only (ignoring severity) ``` norm_4ce_min_col = norm_4ce_min_col.loc[norm_4ce_min_col["category"] != "All minus Severe"] norm_jhu_min_col = norm_jhu_min_col.copy() norm_4ce_min_col = norm_4ce_min_col.copy() print(norm_4ce_min_col["category"].unique()) if isHospitalizationData: norm_jhu_min_col["category"] = "Hospital admission rate" else: norm_jhu_min_col["category"] = "JHU CSSE: Positive Cases" norm_4ce_min_col["category"] = norm_4ce_min_col["category"].apply(lambda x: "4CE: " + x) norm_df = norm_jhu_min_col.append(norm_4ce_min_col, ignore_index=True) norm_df["country_source"] = norm_df.apply(lambda row: row["country"] + "_" + row["source"], axis='columns') norm_df.to_csv("../data/norm_df.csv") norm_fce_df = norm_df.loc[norm_df['source'] == '4CE'].copy() norm_df norm_df["category"].unique() ``` ## Temporary: drop Spain since so few discharges per year, it causes the rates to be way off ``` #norm_df = norm_df.loc[norm_df["country"] != "Spain"] #norm_fce_df = norm_fce_df.loc[norm_fce_df["country"] != "Spain"] min_date_3 = min_date + datetime.timedelta(days=3) max_date_3 = max_date - datetime.timedelta(days=3) norm_df = norm_df.loc[(norm_df["jhu_past_100"]) & (norm_df["date"] >= min_date_3) & (norm_df["date"] <= max_date_3)] title = "Country-Level Positive Case Rate, Comparison to JHU CSSE Data" # Selection source_selection = alt.selection_multi(fields=["source"], bind="legend") min_date = norm_df["date"].min() max_date = norm_df["date"].max() norm_fce_df = norm_fce_df.loc[(norm_fce_df["date"] >= min_date) & (norm_fce_df["date"] <= max_date)] # Domains date_domain = [alt.DateTime(year=min_date.year, month=min_date.month, date=min_date.day), alt.DateTime(year=max_date.year, month=max_date.month, date=max_date.day)] sites_domain = [0, norm_fce_df["num_hosps"].max() + 1] patients_domain = [0, norm_fce_df["count"].max() + 1] rate_domain = [0, norm_fce_df["RATE_7_day_avg"].max() + 1] country_names = COUNTRIES COUNTRY_COLORS = [country_color_map[c] for c in country_names] country_source_names = [c + "_" + "4CE" for c in country_names] + [c + "_" + "JHU CSSE" for c in country_names] color_scale = alt.Scale(domain=country_names, range=COUNTRY_COLORS) join_color_scale = alt.Scale(domain=country_source_names, range=COUNTRY_COLORS + ["#707070"] * len(country_names)) country_width = 170 nearest = alt.selection_single(encodings=['x', 'y'], on="mouseover", nearest=True, empty="none", clear="mouseout") y_selection = alt.selection_interval(encodings=["y"], bind="scales", init={"y": rate_domain}) date_brush = alt.selection(type='interval', encodings=['x']) # Additional Visual Elements tooltip = [ alt.Tooltip("source", title="Data source"), alt.Tooltip("country", title="Country"), alt.Tooltip("count", title="Daily Cases"), alt.Tooltip("adjusted_count", title="Adjusted Daily Cases"), alt.Tooltip("RATE_7_day_avg", title="Daily Case Rate, 7-day Average", format=".2f"), alt.Tooltip("date", title="Date"), alt.Tooltip("ci_below", title="95% CI upper bound", format=".2f"), alt.Tooltip("ci_above", title="95% CI lower bound", format=".2f") ] rule = alt.Chart().mark_rule(color="red", size=0.5).encode( x="date:T" ).transform_filter( nearest ) line = alt.Chart(norm_df).transform_filter(source_selection).mark_line(opacity=0.7).encode( x=alt.X("date:T", title=None, axis=alt.Axis(labelBound=True), scale=alt.Scale(padding=5)), y=alt.Y("RATE_7_day_avg:Q", axis=alt.Axis(title="Adjusted daily case rate, 7 day average"), scale=alt.Scale(zero=False, nice=False, domain=rate_domain, padding=5)), strokeDash=alt.StrokeDash("source:N", scale=alt.Scale(domain=["4CE", "JHU CSSE"], range=[[0,0], [3,3]]), legend=alt.Legend(title="Data Source")), color=alt.Color("country_source:N", scale=join_color_scale, legend=None), tooltip=tooltip ).properties(width=country_width, height=200) errorband = line.transform_filter(alt.datum["source"] == "4CE").mark_errorband().encode( x=alt.X(f"date:T", title=None, axis=alt.Axis(labelBound=True)), y=alt.Y(f"sum(ci_below):Q", title=""), y2=alt.Y2(f"sum(ci_above):Q", title=""), color=alt.Color(f"country:N", scale=color_scale, legend=alt.Legend(title=None)), tooltip=tooltip ) circle = ( line.mark_circle() .encode( size=alt.condition(~nearest, alt.value(5), alt.value(30)) ) .add_selection(nearest) ) num_sites_bar_bg = ( alt.Chart(norm_fce_df) .mark_bar(size=2) .encode( x=alt.X("date:T", scale=alt.Scale(domain=date_domain, padding=5), title=None, axis=alt.Axis(labelBound=True)), y=alt.Y("num_sites:Q", axis=alt.Axis(title="# of sites"), scale=alt.Scale(domain=sites_domain)), color=alt.value("gray"), tooltip=tooltip ) .properties(width=country_width, height=60) ) num_sites_bar = ( num_sites_bar_bg .encode( color=alt.Color("country:N", scale=color_scale, legend=None), ) .transform_filter(date_brush) ) num_patients_bar_bg = ( alt.Chart(norm_fce_df) .mark_bar(size=2) .encode( x=alt.X("date:T", scale=alt.Scale(domain=date_domain, padding=5), title=None, axis=alt.Axis(labelBound=True)), y=alt.Y("count:Q", axis=alt.Axis(title="# of new cases"), scale=alt.Scale(domain=patients_domain)), color=alt.value("gray"), tooltip=tooltip ) .properties(width=country_width, height=60) ) num_patients_bar = ( num_patients_bar_bg .encode( color=alt.Color("country:N", scale=color_scale, legend=None), ) .transform_filter(date_brush) ) top = ( alt.layer(line, errorband, circle, rule, data=norm_df) .facet( column=alt.Column("country:N"), bounds="flush" #header=alt.Header(labels=False) ) .add_selection(y_selection) .transform_filter(date_brush) ) num_sites_bottom = ( alt.layer(num_sites_bar_bg, num_sites_bar, rule, data=norm_fce_df) .facet( column=alt.Column("country:N", header=alt.Header(labels=False)), bounds="flush" ) .add_selection(nearest) .add_selection(date_brush) ) num_patients_bottom = ( alt.layer(num_patients_bar_bg, num_patients_bar, rule, data=norm_fce_df) .facet( column=alt.Column("country:N", header=alt.Header(labels=False)), bounds="flush" ) .add_selection(nearest) .add_selection(date_brush) ) plot = ( alt.vconcat(top, num_patients_bottom, num_sites_bottom, spacing=5) .resolve_scale(color="shared", x="independent") .properties(title={ "text": title, "subtitle": get_visualization_subtitle(data_release=DATA_RELEASE, cohort=COHORT, num_sites=NUM_SITES), "subtitleColor": "gray", "dx": 60 }) .add_selection(source_selection) ) plot = apply_theme( plot, axis_label_font_size=10, axis_title_font_size=12, axis_title_padding=8, legend_orient="bottom", legend_symbol_type="stroke", legend_title_orient="left", legend_title_font_size=14, label_font_size=12 ).configure_header(title=None, labelPadding=3, labelFontSize=13) for_website(plot, "Daily Count", "country-level rate of positive cases") plot norm_df = norm_df.dropna(subset=["RATE_7_day_avg"]) norm_df = norm_df.loc[~((norm_df["source"] == "JHU CSSE") & (norm_df["RATE_7_day_avg"] < 0))] # Revision 2021-06-28: # - remove Singapore, Italy and Spain # - show data time points until Sep. 29th, 2020 norm_df = norm_df.loc[~norm_df["country"].isin(["Singapore", "Italy"])].copy() norm_df = norm_df.loc[norm_df["category"] != "4CE: Severe"].copy() norm_fce_df = norm_fce_df.loc[~norm_fce_df["country"].isin(["Singapore", "Italy"])].copy() ## Change category since we are now using differen data sources if isHospitalizationData: pub_data_category_str = 'Hospital admission rate' norm_df.loc[norm_df.source == "JHU CSSE", 'source'] = pub_data_category_str norm_fce_df.loc[norm_fce_df.source == "JHU CSSE", 'source'] = pub_data_category_str else: pub_data_category_str = "JHU CSSE" ### min_date = norm_df["date"].min() max_date = datetime.date(year=2020, month=9, day=29) date_domain = [alt.DateTime(year=min_date.year, month=min_date.month, date=min_date.day), alt.DateTime(year=max_date.year, month=max_date.month, date=max_date.day)] date_brush = alt.selection(type='interval', encodings=['x']) y_selection = alt.selection(type='interval', encodings=["y"], bind="scales", init={"y": rate_domain}) tooltip = [ alt.Tooltip("source", title="Data source"), alt.Tooltip("country", title="Country"), alt.Tooltip("count", title="Daily Cases"), alt.Tooltip("adjusted_count", title="Adjusted Daily Cases"), alt.Tooltip("RATE_7_day_avg", title="Daily Case Rate, 7-day Average", format=".2f"), alt.Tooltip("date", title="Date"), alt.Tooltip("num_hosps", title="Number of 4CE hospitals"), alt.Tooltip("ci_below", title="95% CI upper bound", format=".2f"), alt.Tooltip("ci_above", title="95% CI lower bound", format=".2f") ] chart = alt.Chart(norm_df) if isHospitalizationData: yTitle = "Hospital admission rate, 7-day average" else: yTitle = "Adjusted daily case rate, 7-day average" line = alt.Chart(norm_df).mark_line(opacity=0.7, size=3).encode( x=alt.X("date:T", title=None, axis=alt.Axis(labelBound=True), scale=alt.Scale(domain=date_domain, padding=5)), y=alt.Y("RATE_7_day_avg:Q", axis=alt.Axis(title=yTitle), scale=alt.Scale(zero=False, nice=False, domain=rate_domain, padding=5)), strokeDash=alt.StrokeDash("source:N", scale=alt.Scale(domain=[pub_data_category_str, "4CE"], range=[[3,3], [0,0]]), legend=None), #, legend=alt.Legend(title="Data Source", symbolStrokeWidth=4)), color=alt.Color("source:N", legend=alt.Legend(title=None, symbolStrokeWidth=6)), # scale=alt.Scale(range=['#E79F00', '#57B4E9', '#D45E00'])) tooltip=tooltip ).properties(width=country_width, height=200) errorband = line.transform_filter(alt.datum["source"] == "4CE").mark_errorband().encode( x=alt.X(f"date:T", title=None, axis=alt.Axis(labelBound=True)), y=alt.Y(f"sum(ci_below):Q", title=""), y2=alt.Y2(f"sum(ci_above):Q", title=""), tooltip=tooltip ) top = ( alt.layer(line, errorband) .facet( column=alt.Column("country:N"), bounds="flush" #header=alt.Header(labels=False) ) .transform_filter(date_brush) .add_selection(y_selection) ) # BOTTOM if isHospitalizationData: midYTitle = '4CE admissions' else: midYTitle = '4CE cases' num_patients_bar = alt.Chart(norm_fce_df).mark_bar(size=2).encode( x=alt.X("date:T", scale=alt.Scale(domain=date_domain, padding=5), title=None, axis=alt.Axis(labelBound=True, labels=False, ticks=False)), y=alt.Y("count:Q", axis=alt.Axis(title=midYTitle), scale=alt.Scale(domain=patients_domain)), color=alt.value("gray"), tooltip=tooltip ).properties(width=country_width, height=70).facet( column=alt.Column("country:N", header=alt.Header(labels=False)), bounds="flush" ).transform_filter(date_brush).add_selection(date_brush) num_sites_bar = alt.Chart(norm_fce_df).mark_bar(size=2).encode( x=alt.X("date:T", scale=alt.Scale(domain=date_domain, padding=5), title=None, axis=alt.Axis(labelBound=True)), y=alt.Y("num_hosps:Q", axis=alt.Axis(title="4CE hospitals"), scale=alt.Scale(domain=sites_domain)), color=alt.value("gray"), tooltip=tooltip ).properties(width=country_width, height=50).facet( column=alt.Column("country:N", header=alt.Header(labels=False)), bounds="flush" ).transform_filter(date_brush).add_selection(date_brush) if isHospitalizationData: title = "Hospital Admission Rate by Country" else: title = "Country-Level Positive Case Rate, Comparison to JHU CSSE Data" plot = ( alt.vconcat(top, num_patients_bar, num_sites_bar, spacing=5) .properties(title={ "text": title, "subtitle": "", "subtitleColor": "gray", "dx": 14 }) ) plot = apply_theme( plot, axis_label_font_size=10, axis_title_font_size=10, axis_title_padding=8, legend_orient="bottom", legend_symbol_type="stroke", legend_title_orient="left", legend_title_font_size=14, label_font_size=12 ).configure_header(title=None, labelPadding=3, labelFontSize=13) for_website(plot, f"Case Rate Comparison {COHORT}", f"4CE vs {pub_data_category_str}", df=norm_df) plot ```
github_jupyter
# PASTIS matrix from E-fields This notebook calculates PASTIS matrices for the low, mid, and high order modes from single-mode E-fields in the focal plane. It also calculates matrices on the low order wavefront sensor (LOWFS) and out of band wavefront sensor (OBWFS). ``` import os import time from shutil import copy from astropy.io import fits import astropy.units as u import hcipy import numpy as np import pastis.util as util from pastis.config import CONFIG_PASTIS from pastis.e2e_simulators.luvoir_imaging import LuvoirA_APLC ``` ## Initial setup and parameters Set up data paths for input and output ``` root_dir = CONFIG_PASTIS.get('local', 'local_data_path') coronagraph_design = 'small' # user provides overall_dir = util.create_data_path(root_dir, telescope='luvoir_'+coronagraph_design) resDir = os.path.join(overall_dir, 'matrix_numerical') print(resDir) ``` Read from configfile how many modes each DM should be able to do ``` max_LO = CONFIG_PASTIS.getint('dm_objects', 'number_of_low_order_modes') max_MID = CONFIG_PASTIS.getint('dm_objects', 'number_of_mid_order_modes') max_HI = CONFIG_PASTIS.getint('dm_objects', 'number_of_high_order_modes') num_DM_act = CONFIG_PASTIS.getint('dm_objects', 'number_of_continuous_dm_actuators') print(f'max_LO: {max_LO}') print(f'max_MID: {max_MID}') print(f'max_HI: {max_HI}') print(f'num_DM_act: {num_DM_act}') ``` Read some more required parameters from the configfile ``` nb_seg = CONFIG_PASTIS.getint('LUVOIR', 'nb_subapertures') wvln = CONFIG_PASTIS.getfloat('LUVOIR', 'lambda') * 1e-9 # m diam = CONFIG_PASTIS.getfloat('LUVOIR', 'diameter') # m nm_aber = CONFIG_PASTIS.getfloat('LUVOIR', 'calibration_aberration') * 1e-9 # m sampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling') print('LUVOIR apodizer design: {}'.format(coronagraph_design)) print() print('Wavelength: {} m'.format(wvln)) print('Telescope diameter: {} m'.format(diam)) print('Number of segments: {}'.format(nb_seg)) print() print('Sampling: {} px per lambda/D'.format(sampling)) # Create necessary directories if they don't exist yet os.makedirs(resDir, exist_ok=True) os.makedirs(os.path.join(resDir, 'OTE_images'), exist_ok=True) os.makedirs(os.path.join(resDir, 'psfs'), exist_ok=True) # Copy configfile to resulting matrix directory util.copy_config(resDir) # Create LUVOIR simulator instance optics_input = os.path.join(util.find_repo_location(), CONFIG_PASTIS.get('LUVOIR', 'optics_path_in_repo')) luvoir = LuvoirA_APLC(optics_input, coronagraph_design, sampling) ``` Generate the deformable mirrors that are required. **This will take quite some time** ``` luvoir.create_global_zernike_mirror(max_LO) luvoir.create_segmented_mirror(max_MID) luvoir.create_ripple_mirror(max_HI) luvoir.create_continuous_deformable_mirror(num_DM_act) # Figur out the total number of "actuators" (= effective modes) for each DM n_LO = luvoir.zernike_mirror.num_actuators n_MID = luvoir.sm.num_actuators n_HI = luvoir.ripple_mirror.num_actuators n_DM = luvoir.dm.num_actuators print(f'n_LO: {n_LO}') print(f'n_MID: {n_MID}') print(f'n_HI: {n_HI}') print(f'n_DM: {n_DM}') ``` Define some parameters that are needed for the subsampled LOWFS output. ``` z_pup_downsample = CONFIG_PASTIS.getfloat('numerical', 'z_pup_downsample') N_pup_z = int(luvoir.pupil_grid.shape[0] / z_pup_downsample) grid_zernike = hcipy.field.make_pupil_grid(N_pup_z, diameter=luvoir.diam) ``` ## Flatten all DMs and create unaberrated reference PSF ``` LO_modes = np.zeros(n_LO) MID_modes = np.zeros(n_MID) HI_modes = np.zeros(n_HI) DM_modes = np.zeros(n_DM) luvoir.zernike_mirror.actuators = LO_modes luvoir.sm.actuators = MID_modes luvoir.ripple_mirror.actuators = HI_modes luvoir.dm.actuators = DM_modes # Calculate the unaberrated coro and direct PSFs in INTENSITY unaberrated_coro_psf, ref = luvoir.calc_psf(ref=True, display_intermediate=False) # Define the normalization factor for contrast units norm = np.max(ref) # Calculate the unaberrated coro and direct PSFs in E-FIELDS nonaberrated_coro_psf, ref, efield = luvoir.calc_psf(ref=True, display_intermediate=False, return_intermediate='efield') Efield_ref = nonaberrated_coro_psf.electric_field ``` ## Generate LOW-order PASTIS matrix from E-fields ``` print('Generating the E-fields for LOW modes in science plane') print(f'number of LO modes: {n_LO}') # Create lists that will hold measured fields print(f'Calibration aberration used: {nm_aber} m') start_time = time.time() focus_fieldS = [] focus_fieldS_Re = [] focus_fieldS_Im = [] for pp in range(0, n_LO): print(f'Working on mode {pp}/{n_LO}') # Apply calibration aberration to used mode LO_modes = np.zeros(n_LO) LO_modes[pp] = nm_aber / 2 luvoir.zernike_mirror.actuators = LO_modes # Calculate coronagraphic E-field and add to lists aberrated_coro_psf, inter = luvoir.calc_psf(display_intermediate=False, return_intermediate='efield') focus_field1 = aberrated_coro_psf focus_fieldS.append(focus_field1) focus_fieldS_Re.append(focus_field1.real) focus_fieldS_Im.append(focus_field1.imag) # Construct the PASTIS matrix from the E-fields mat_fast = np.zeros([n_LO, n_LO]) # create empty matrix for i in range(0, n_LO): for j in range(0, n_LO): test = np.real((focus_fieldS[i].electric_field - Efield_ref) * np.conj(focus_fieldS[j].electric_field - Efield_ref)) dh_test = (test / norm) * luvoir.dh_mask contrast = np.mean(dh_test[np.where(luvoir.dh_mask != 0)]) mat_fast[i, j] = contrast # Normalize by the calibration aberration matrix_pastis = np.copy(mat_fast) matrix_pastis /= np.square(nm_aber * 1e9) ``` Save results out to disk ``` filename_matrix = 'PASTISmatrix_num_LO_' + str(max_LO) hcipy.write_fits(matrix_pastis, os.path.join(resDir, filename_matrix + '.fits')) print('Matrix saved to:', os.path.join(resDir, filename_matrix + '.fits')) filename_matrix = 'EFIELD_Re_matrix_num_LO_' + str(max_LO) hcipy.write_fits(focus_fieldS_Re, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Real saved to:', os.path.join(resDir, filename_matrix + '.fits')) filename_matrix = 'EFIELD_Im_matrix_num_LO_' + str(max_LO) hcipy.write_fits(focus_fieldS_Im, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Imag saved to:', os.path.join(resDir, filename_matrix + '.fits')) end_time = time.time() print('Runtime for LO modes:', end_time - start_time, 'sec =', (end_time - start_time) / 60, 'min') print('Data saved to {}'.format(resDir)) ``` ## Generate MID-order PASTIS matrix from E-fields ``` print('Generating the Efield for MID modes in science plane') print(f'number of MID modes: {n_MID}') LO_modes = np.zeros(n_LO) MID_modes = np.zeros(n_MID) HI_modes = np.zeros(n_HI) # Create lists that will hold measured fields print(f'Calibration aberration used: {nm_aber} m') start_time = time.time() focus_fieldS = [] focus_fieldS_Re = [] focus_fieldS_Im = [] for pp in range(0, n_MID): print(f'Working on mode {pp}/{n_MID}') # Apply calibration aberration to used mode MID_modes = np.zeros(n_MID) MID_modes[pp] = nm_aber / 2 luvoir.sm.actuators = MID_modes # Calculate coronagraphic E-field and add to lists aberrated_coro_psf, inter = luvoir.calc_psf(display_intermediate=False, return_intermediate='efield') focus_field1 = aberrated_coro_psf focus_fieldS.append(focus_field1) focus_fieldS_Re.append(focus_field1.real) focus_fieldS_Im.append(focus_field1.imag) # Construct the PASTIS matrix from the E-fields mat_fast = np.zeros([n_MID, n_MID]) for i in range(0, n_MID): for j in range(0, n_MID): test = np.real((focus_fieldS[i].electric_field - Efield_ref) * np.conj(focus_fieldS[j].electric_field - Efield_ref)) dh_test = (test / norm) * luvoir.dh_mask contrast = np.mean(dh_test[np.where(luvoir.dh_mask != 0)]) mat_fast[i, j] = contrast # Normalize by the calibration aberration matrix_pastis = np.copy(mat_fast) matrix_pastis /= np.square(nm_aber * 1e9) filename_matrix = 'PASTISmatrix_num_MID_' + str(max_MID) hcipy.write_fits(matrix_pastis, os.path.join(resDir, filename_matrix + '.fits')) print('Matrix saved to:', os.path.join(resDir, filename_matrix + '.fits')) filename_matrix = 'EFIELD_Re_matrix_num_MID_' + str(max_MID) hcipy.write_fits(focus_fieldS_Re, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Real saved to:', os.path.join(resDir, filename_matrix + '.fits')) filename_matrix = 'EFIELD_Im_matrix_num_MID_' + str(max_MID) hcipy.write_fits(focus_fieldS_Im, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Imag saved to:', os.path.join(resDir, filename_matrix + '.fits')) end_time = time.time() print('Runtime for MID modes:', end_time - start_time, 'sec =', (end_time - start_time) / 60, 'min') print('Data saved to {}'.format(resDir)) ``` ## Generate MID-order PASTIS matrix from E-fields ``` print('Generating the Efield for HI modes in science plane') print(f'number of HI modes: {n_HI}') LO_modes = np.zeros(n_LO) MID_modes = np.zeros(n_MID) HI_modes = np.zeros(n_HI) # Create lists that will hold measured fields print(f'Calibration aberration used: {nm_aber} m') start_time = time.time() focus_fieldS = [] focus_fieldS_Re = [] focus_fieldS_Im = [] for pp in range(0, n_HI): print(f'Working on mode {pp}/{n_HI}') # Apply calibration aberration to used mode HI_modes = np.zeros(n_HI) HI_modes[pp] = nm_aber / 2 luvoir.ripple_mirror.actuators = HI_modes # Calculate coronagraphic E-field and add to lists aberrated_coro_psf, inter = luvoir.calc_psf(display_intermediate=False, return_intermediate='efield') focus_field1 = aberrated_coro_psf focus_fieldS.append(focus_field1) focus_fieldS_Re.append(focus_field1.real) focus_fieldS_Im.append(focus_field1.imag) # Construct the PASTIS matrix from the E-fieldsmat_fast = np.zeros([n_HI, n_HI]) for i in range(0, n_HI): for j in range(0, n_HI): test = np.real((focus_fieldS[i].electric_field - Efield_ref) * np.conj(focus_fieldS[j].electric_field - Efield_ref)) dh_test = (test / norm) * luvoir.dh_mask contrast = np.mean(dh_test[np.where(luvoir.dh_mask != 0)]) mat_fast[i, j] = contrast # Normalize by the calibration aberration matrix_pastis = np.copy(mat_fast) matrix_pastis /= np.square(nm_aber * 1e9) filename_matrix = 'PASTISmatrix_num_HI_' + str(max_HI) hcipy.write_fits(matrix_pastis, os.path.join(resDir, filename_matrix + '.fits')) print('Matrix saved to:', os.path.join(resDir, filename_matrix + '.fits')) filename_matrix = 'EFIELD_Re_matrix_num_HI_' + str(max_HI) hcipy.write_fits(focus_fieldS_Re, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Real saved to:', os.path.join(resDir, filename_matrix + '.fits')) filename_matrix = 'EFIELD_Im_matrix_num_HI_' + str(max_HI) hcipy.write_fits(focus_fieldS_Im, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Imag saved to:', os.path.join(resDir, filename_matrix + '.fits')) end_time = time.time() print('Runtime for HI modes:', end_time - start_time, 'sec =', (end_time - start_time) / 60, 'min') print('Data saved to {}'.format(resDir)) ``` ## Generate LOW-order matrix on LOWFS ``` print('Generating the Efield for LOW modes through LOWFS') print('number of LO modes'.format(n_LO)) ``` Flatten DMs ``` LO_modes = np.zeros(n_LO) MID_modes = np.zeros(n_MID) HI_modes = np.zeros(n_HI) luvoir.zernike_mirror.actuators = LO_modes luvoir.sm.actuators = MID_modes luvoir.ripple_mirror.actuators = HI_modes zernike_ref = luvoir.calc_low_order_wfs() ``` Calculate unaberrated reference E-field on Zernike WFS on a subsampled grid. ``` zernike_ref_sub_real = hcipy.field.subsample_field(zernike_ref.real, z_pup_downsample, grid_zernike, statistic='mean') zernike_ref_sub_imag = hcipy.field.subsample_field(zernike_ref.imag, z_pup_downsample, grid_zernike, statistic='mean') Efield_ref = zernike_ref_sub_real + 1j*zernike_ref_sub_imag # Create lists that will hold measured fields print(f'Calibration aberration used: {nm_aber} m') start_time = time.time() focus_fieldS = [] focus_fieldS_Re = [] focus_fieldS_Im = [] for pp in range(0, n_LO): print(f'Working on mode {pp}/{n_LO}') # Apply calibration aberration to used mode LO_modes = np.zeros(n_LO) LO_modes[pp] = nm_aber / 2 luvoir.zernike_mirror.actuators = LO_modes # Calculate E-field on Zernike WFS and add to lists zernike_meas = luvoir.calc_low_order_wfs() zernike_meas_sub_real = hcipy.field.subsample_field(zernike_meas.real, z_pup_downsample, grid_zernike,statistic='mean') zernike_meas_sub_imag = hcipy.field.subsample_field(zernike_meas.imag, z_pup_downsample, grid_zernike,statistic='mean') focus_field1 = zernike_meas_sub_real + 1j * zernike_meas_sub_imag focus_fieldS.append(focus_field1) focus_fieldS_Re.append(focus_field1.real) focus_fieldS_Im.append(focus_field1.imag) filename_matrix = 'EFIELD_LOWFS_Re_matrix_num_LO_' + str(max_LO) hcipy.write_fits(focus_fieldS_Re, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Real saved to:', os.path.join(resDir, filename_matrix + '.fits')) filename_matrix = 'EFIELD_LOWFS_Im_matrix_num_LO_' + str(max_LO) hcipy.write_fits(focus_fieldS_Im, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Imag saved to:', os.path.join(resDir, filename_matrix + '.fits')) end_time = time.time() print('Runtime for LO modes and LOWFS:', end_time - start_time, 'sec =', (end_time - start_time) / 60, 'min') print('Data saved to {}'.format(resDir)) ``` ## Generate MID-order matrix on OBWFS ``` print('Generating the Efield for MID modes to OBWFS') print('number of MID modes'.format(n_MID)) # Flatten DMs LO_modes = np.zeros(n_LO) MID_modes = np.zeros(n_MID) HI_modes = np.zeros(n_HI) luvoir.zernike_mirror.actuators = LO_modes luvoir.sm.actuators = MID_modes luvoir.ripple_mirror.actuators = HI_modes # Calculate unaberrated reference E-field on Zernike WFS on a subsampled grid. zernike_ref = luvoir.calc_out_of_band_wfs() zernike_ref_sub_real = hcipy.field.subsample_field(zernike_ref.real, z_pup_downsample, grid_zernike, statistic='mean') zernike_ref_sub_imag = hcipy.field.subsample_field(zernike_ref.imag, z_pup_downsample, grid_zernike, statistic='mean') Efield_ref = zernike_ref_sub_real + 1j*zernike_ref_sub_imag # Create lists that will hold measured fields print(f'Calibration aberration used: {nm_aber} m') start_time = time.time() focus_fieldS = [] focus_fieldS_Re = [] focus_fieldS_Im = [] for pp in range(0, n_MID): print(f'Working on mode {pp}/{n_MID}') # Apply calibration aberration to used mode MID_modes = np.zeros(n_MID) MID_modes[pp] = nm_aber / 2 luvoir.sm.actuators = MID_modes # Calculate E-field on OBWFS and add to lists zernike_meas = luvoir.calc_out_of_band_wfs() zernike_meas_sub_real = hcipy.field.subsample_field(zernike_meas.real, z_pup_downsample, grid_zernike, statistic='mean') zernike_meas_sub_imag = hcipy.field.subsample_field(zernike_meas.imag, z_pup_downsample, grid_zernike, statistic='mean') focus_field1 = zernike_meas_sub_real + 1j * zernike_meas_sub_imag focus_fieldS.append(focus_field1) focus_fieldS_Re.append(focus_field1.real) focus_fieldS_Im.append(focus_field1.imag) filename_matrix = 'EFIELD_OBWFS_Re_matrix_num_MID_' + str(max_MID) hcipy.write_fits(focus_fieldS_Re, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Real saved to:', os.path.join(resDir, filename_matrix + '.fits')) filename_matrix = 'EFIELD_OBWFS_Im_matrix_num_MID_' + str(max_MID) hcipy.write_fits(focus_fieldS_Im, os.path.join(resDir, filename_matrix + '.fits')) print('Efield Imag saved to:', os.path.join(resDir, filename_matrix + '.fits')) end_time = time.time() print('Runtime for MID modes and OBWFS:', end_time - start_time, 'sec =', (end_time - start_time) / 60, 'min') print('Data saved to {}'.format(resDir)) ```
github_jupyter
# Image analysis with fMRI 3D images imported with LORIS API This is a tutorial to show how to use Loris' API to download MRI images. It also contains a few examples of how the data can be used to run basic data analysis. This tutorial is also available as a Google colab notebook so you can run it directly from your browser. To access it, click on the button below: <a href="https://colab.research.google.com/github/spell00/Loris/blob/2020-08-06-JupyterCreateImageDataset/docs/notebooks/LORIS-API_Part3-Create_image_dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` # Uncomment and run to install the packages required to run the notebook # !pip3 install tqdm # !pip3 install numpy # !pip3 install nibabel # !pip3 install sklearn # !pip3 install matplotlib # !pip3 install nilearn ``` ## Setup ``` import getpass # For input prompt not to show what is entered import json # Provide convenient functions to handle json objects import re # For regular expression import requests # To handle http requests import nibabel as nib import numpy as np import warnings from tqdm import tqdm_notebook as tqdm # To make a nice progress bar import os import itertools os.chdir('..') warnings.simplefilter('ignore') # Because I am using unverified ssl certificates def prettyPrint(string): print(json.dumps(string, indent=2, sort_keys=True)) import argparse import torch import torch.nn as nn import numpy as np import json from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from fmri.utils.activations import Swish, Mish from fmri.utils.CycleAnnealScheduler import CycleScheduler from fmri.utils.dataset import load_checkpoint, save_checkpoint, MRIDataset from fmri.utils.transform_3d import Normalize, RandomRotation3D, ColorJitter3D, Flip90, Flip180, Flip270, XFlip, YFlip, \ ZFlip from fmri.models.supervised.MLP import MLP from fmri.utils.plot_performance import plot_performance import torchvision from torchvision import transforms from ax.service.managed_loop import optimize import random import nibabel as nib from fmri.utils.utils import validation_spliter import nilearn.plotting as nlplt ``` ## Getting the data The data on https://demo.loris.ca are only for development purposes. Nevertheless, with this in mind, we will use it for demonstration purposes only. In this tutorial, we will download all the T1 and T2 raw images from every project. ``` images_path = 'D:\\workbench\\projects\\AutoTKV_MouseMRI-master\\AllTrainingImages\\images\\' targets_path = 'D:\\workbench\\projects\\AutoTKV_MouseMRI-master\\AllTrainingImages\\targets\\' all_set = MRIDataset(images_path, targets_path, transform=None, resize=False) spliter = validation_spliter(all_set, cv=5) valid_set, train_set = spliter.__next__() train_loader = DataLoader(train_set, num_workers=0, shuffle=True, batch_size=1, pin_memory=False, drop_last=True) valid_loader = DataLoader(valid_set, num_workers=0, shuffle=True, batch_size=1, pin_memory=False, drop_last=True) sample_x, sample_target = next(iter(valid_set)) sample_x = sample_x.numpy().squeeze() sample_target = sample_target.numpy().squeeze() np.round(sample.shape) / 2 def np_to_nifti(sample): coords = np.round(sample.shape) / 2 t1_fullimage = nib.Nifti1Image(sample_x, np.eye(4)) return nlplt.plot_anat(t1_fullimage, (128, 128, 10)) np_to_nifti(sample_x) np_to_nifti(sample_target) def _resize_data(data, new_size=(160, 160, 160)): initial_size_x = data.shape[0] initial_size_y = data.shape[1] initial_size_z = data.shape[2] new_size_x = new_size[0] new_size_y = new_size[1] new_size_z = new_size[2] delta_x = initial_size_x / new_size_x delta_y = initial_size_y / new_size_y delta_z = initial_size_z / new_size_z new_data = np.zeros((new_size_x, new_size_y, new_size_z)) for x, y, z in itertools.product(range(new_size_x), range(new_size_y), range(new_size_z)): new_data[x][y][z] = data[int(x * delta_x)][int(y * delta_y)][int(z * delta_z)] return new_data sample_x_14x14x14 = _resize_data(sample_x, (14, 14, 14)) t1_fullimage = nib.Nifti1Image(sample_x_14x14x14, np.eye(4)) nlplt.plot_anat(t1_fullimage, (7, 7, 7)) nlplt.show() training_images_dir = 'D:\workbench\projects\AutoTKV_MouseMRI-master\AllTrainingImages\images' training_targets_dir = 'D:\workbench\projects\AutoTKV_MouseMRI-master\AllTrainingImages\targets' ``` #### Then, we get the information necessary to retrieve all images from all the projects and store them in a dictionnary. ``` # The dictionary to store the images images_dict = { "raw": { 't1': [], 't2': [] }, "32x32x32": { 't1': [], 't2': [] }, "128x128x128": { 't1': [], 't2': [] } } # Progress bar for downloads pbar = tqdm(total=sum([len([meta for meta in imagesMeta[p]['Images'] if meta['ScanType'] in ['t1', 't2']]) for p in projectnames])) for project in projectnames: for i, meta in enumerate(imagesMeta[project]['Images']): if(meta['ScanType'] not in ['t1', 't2']): continue r = requests.get(baseurl + meta['Link'], headers = {'Authorization': 'Bearer %s' % token}) page = r.content filename = meta['Link'].split('/')[-1] t = meta['ScanType'] # The images need to be saved first. # Only t1 and t2 images are kept. if (t in ['t1', 't2']): file_ = open(filename, 'wb') else: continue file_.write(page) file_.close() img = nib.load(filename) # The images are not necessary for the rest of this tutorial. os.remove(filename) img = img.get_fdata() # The images are save in the dictionary if(meta['ScanType'] == 't1'): images_dict["raw"]["t1"] += [img] if(meta['ScanType'] == 't2'): images_dict["raw"]["t2"] += [img] pbar.update(1) ``` ## Preprocessing In this section, we'll explore a few preprocessing methods that might make the models learned perform better. ### Resize images In this tutorial, T1 and T2 images are compared. They are of similar sizes (160x256x224 and 160x256x256 for T1 and T2, respectively), but they need to be exactly the same size for any subsequent analysis. In machine learning, it is common practice to reduce large images before training a model. Large images have the advantage of containing more information, but it comes with a tradeoff known as the Curse of dimensionality. Having a high dimensionality can make it much easier to have good performances on the training set, but the models trained overfit more easily to the training data and perform poorly on the validation and test data. Of course, reducing images too much will also harm the performance of the model trained. There is no rule of thumb or algorithm to get the optimal size of images to be used in a specific task, so it might be a good idea to try a few different reductions. This tutorial will explore 2 dimensions. Both will cubes (all sides have the same length): 128x128x128 and 32x32x32. The later dimensions might be a huge reduction, but the 3D images still have 32,768 dimensions (each voxel being a dimension), which is still huge, but much more manageable than the larger reduction, which has 2,097,152 dimensions. In order to decide which reduction to use, we will observe the data using a Principal Component Analysis (PCA). It will give an idea of whether the data has lost too much information to use it in a classification task. Ultimately, it might be necessary to use both strategies to test if one is better than the other. In case both strategies appear to be equal, Ockham's razor principle suggest the images with fewer voxels should be used. In this case, the notion of equality is somewhat arbitrary and might depend on the task to be accomplished. ``` def resize_image(image, new_size=(160, 160, 160)): """ Function to resize an image. Args: image (Numpy array of shape (Length, Width, Depth)): image to transform new_size (3-Tuple) : The new image length, width and Depth """ initial_size_x = image.shape[0] initial_size_y = image.shape[1] initial_size_z = image.shape[2] new_size_x = new_size[0] new_size_y = new_size[1] new_size_z = new_size[2] delta_x = initial_size_x / new_size_x delta_y = initial_size_y / new_size_y delta_z = initial_size_z / new_size_z new_image = np.zeros((new_size_x, new_size_y, new_size_z)) for x, y, z in itertools.product(range(new_size_x), range(new_size_y), range(new_size_z)): new_image[x][y][z] = image[int(x * delta_x)][int(y * delta_y)][int(z * delta_z)] return new_image ``` We need to create new directeories to save the resized T1 and T2 images. #### Resize and normalize all T1 images ``` from sklearn.preprocessing import Normalizer pbar = tqdm(total=len(images_dict['raw']['t1'])) for t1 in images_dict['raw']["t1"]: t1_32 = resize_image(t1, (32, 32, 32)) t1_32 = Normalizer().fit_transform(t1_32.reshape([1, -1])) t1_32 = t1_32.reshape([-1, 32, 32, 32]) images_dict['32x32x32']['t1'] += [t1_32] t1_128 = resize_image(t1, (128, 128, 128)) t1_128 = Normalizer().fit_transform(t1_128.reshape([1, -1])) t1_128 = t1_128.reshape([-1, 128, 128, 128]) images_dict['128x128x128']['t1'] += [t1_128] pbar.update(1) """ We don't need to save the images for this tutorial, but the package nibabel can be used to save the images to disk like this: img = nib.Nifti1Image(image_to_save, np.eye(4)) img.to_filename("/path/to/new_file_name.nii") """ # Make numpy arrays from the lists of numpy arrays images_dict['32x32x32']['t1'] = np.stack(images_dict['32x32x32']['t1']) images_dict['128x128x128']['t1'] = np.stack(images_dict['128x128x128']['t1']) ``` #### Resize and normalize T2 images ``` pbar = tqdm(total=len(images_dict['raw']['t2'])) for t2 in images_dict['raw']["t2"]: t2_32 = resize_image(t2, (32, 32, 32)) t2_32 = Normalizer().fit_transform(t2_32.reshape([1, -1])) t2_32 = t2_32.reshape([-1, 32, 32, 32]) images_dict['32x32x32']['t2'] += [t2_32] t2_128 = resize_image(t2, (128, 128, 128)) t2_128 = Normalizer().fit_transform(t2_128.reshape([1, -1])) t2_128 = t2_128.reshape([-1, 128, 128, 128]) images_dict['128x128x128']['t2'] += [t2_128] pbar.update(1) # Make numpy arrays from the lists of numpy arrays images_dict['32x32x32']['t2'] = np.stack(images_dict['32x32x32']['t2']) images_dict['128x128x128']['t2'] = np.stack(images_dict['128x128x128']['t2']) ``` ### Visualisation with nilearn Visualisation of the raw images and the 2 reductions for T1 and T2 images. #### T1 images ``` # This package is used to plot a section of the 3D images import nilearn.plotting as nlplt print("Original (160x256x224)") t1_fullimage = nib.Nifti1Image(images_dict['raw']['t1'][0], np.eye(4)) nlplt.plot_anat(t1_fullimage, (80, 128, 112)) nlplt.show() print("128x128x128") img_t1_128 = nib.Nifti1Image(resize_image(images_dict['raw']['t1'][0], (128, 128, 128)), np.eye(4)) nlplt.plot_anat(img_t1_128, (64, 64, 64)) nlplt.show() print("32x32x32") img_t1_32 = nib.Nifti1Image(resize_image(images_dict['raw']['t1'][0], (32, 32, 32)), np.eye(4)) nlplt.plot_anat(img_t1_32, (16, 16, 16)) nlplt.show() ``` #### T2 images ``` print("Original (160x256x256)") t2_fullimage = nib.Nifti1Image(images_dict['raw']['t2'][0], np.eye(4)) nlplt.plot_anat(t2_fullimage, (80, 128, 112)) nlplt.show() print("128x128x128") img_t2_128 = nib.Nifti1Image(resize_image(images_dict['raw']['t2'][0], (128, 128, 128)), np.eye(4)) nlplt.plot_anat(img_t2_128, (64, 64, 64)) nlplt.show() print("32x32x32") img_t2_32 = nib.Nifti1Image(resize_image(images_dict['raw']['t2'][0], (32, 32, 32)), np.eye(4)) nlplt.plot_anat(img_t2_32, (16, 16, 16)) nlplt.show() ``` ## Unsupervised learning: Principal Component Analysis Principal Component Analysis (PCA) is a popular method used for dimensioanlity reduction, which is a good first step to vizualise the data to analyse and can give insight for the subsequent steps of the analysis. Dimensionality reduction can also be used to transform the data before using it to train a ML model. ``` # sklearn needs the data to be flattened images_dict['32x32x32']['t1'] = images_dict['32x32x32']['t1'].reshape( [images_dict['32x32x32']['t1'].shape[0], -1] ) images_dict['128x128x128']['t1'] = images_dict['128x128x128']['t1'].reshape( [images_dict['128x128x128']['t1'].shape[0], -1] ) images_dict['32x32x32']['t2'] = images_dict['32x32x32']['t2'].reshape( [images_dict['32x32x32']['t2'].shape[0], -1] ) images_dict['128x128x128']['t2'] = images_dict['128x128x128']['t2'].reshape( [images_dict['128x128x128']['t2'].shape[0], -1] ) #@title The orginal T1 images have a total of 9175040 voxels. from IPython.display import Markdown as md md("The sizes for the 32x32x32 and 128x128x128 images are \ {} and {}, respectively. They represent {}% and \ {}% of the original size.".format(images_dict['32x32x32']['t1'].shape[1], images_dict['128x128x128']['t1'].shape[1], np.round(images_dict['32x32x32']['t1'].shape[1] / 9175040 * 100, 2), np.round(images_dict['128x128x128']['t1'].shape[1] / 9175040 * 100, 2), ) ) from sklearn.decomposition import PCA import matplotlib.pyplot as plt import matplotlib.patches as mpatches pca32 = PCA(n_components=2) pca32.fit( np.concatenate([ images_dict['32x32x32']['t1'][:30], images_dict['32x32x32']['t2'][:30] ], 0) ) # Some samples (usually ~10-20%) are used as validation data that will not # be used to train the model. t1_transform_train = pca32.transform(images_dict['32x32x32']['t1'][:30]) t2_transform_train = pca32.transform(images_dict['32x32x32']['t2'][:30]) t1_transform_valid = pca32.transform(images_dict['32x32x32']['t1'][30:]) t2_transform_valid = pca32.transform(images_dict['32x32x32']['t2'][30:]) plt.figure(figsize=(12,6)) blues = ['b' for _ in range(len(images_dict['32x32x32']['t1'][:30]))] greens = ['g' for _ in range(len(images_dict['32x32x32']['t2'][:30]))] reds = ['r' for _ in range(len(images_dict['32x32x32']['t1'][30:]))] cyans = ['c' for _ in range(len(images_dict['32x32x32']['t2'][30:]))] blue_patch = mpatches.Patch(color='b', label='T1 (train)') green_patch = mpatches.Patch(color='g', label='T2 (train)') red_patch = mpatches.Patch(color='r', label='T1 (valid)') cyan_patch = mpatches.Patch(color='c', label='T2 (valid)') plt.scatter(t1_transform_train[:, 0], t1_transform_train[:, 1], c=blues) plt.scatter(t2_transform_train[:, 0], t2_transform_train[:, 1], c=greens) plt.scatter(t1_transform_valid[:, 0], t1_transform_valid[:, 1], c=reds) plt.scatter(t2_transform_valid[:, 0], t2_transform_valid[:, 1], c=cyans) plt.title('PCA of images resized to 32x32x32') plt.legend() plt.xlabel('Component 1') plt.ylabel('Component 2') plt.legend(handles=[blue_patch, green_patch, red_patch, cyan_patch]) plt.show() plt.close() pca128 = PCA(n_components=2) pca128.fit( np.concatenate([ images_dict['128x128x128']['t1'][:30], images_dict['128x128x128']['t2'][:30] ], 0) ) t1_transform_train = pca128.transform(images_dict['128x128x128']['t1'][:30]) t2_transform_train = pca128.transform(images_dict['128x128x128']['t2'][:30]) t1_transform_valid = pca128.transform(images_dict['128x128x128']['t1'][30:]) t2_transform_valid = pca128.transform(images_dict['128x128x128']['t2'][30:]) plt.figure(figsize=(12,6)) plt.scatter(t1_transform_train[:, 0], t1_transform_train[:, 1], c=blues) plt.scatter(t2_transform_train[:, 0], t2_transform_train[:, 1], c=greens) plt.scatter(t1_transform_valid[:, 0], t1_transform_valid[:, 1], c=reds) plt.scatter(t2_transform_valid[:, 0], t2_transform_valid[:, 1], c=cyans) plt.title('PCA of images resized to 128x128x128') plt.xlabel('Component 1') plt.ylabel('Component 2') plt.legend(handles=[blue_patch, green_patch, red_patch, cyan_patch]) plt.show() plt.close() #@title The orginal T1 images have a total of 9175040 voxels. from IPython.display import Markdown as md md("For the 128x128x128 voxel images, the first component of the PCA " "explains ~{}% of the variance of the images and the second ~{}%. " "For the 32x32x32 images, the first component explains {}% of the " "variance and the second {}%".format( np.round(pca128.explained_variance_ratio_[0] * 100, 2), np.round(pca128.explained_variance_ratio_[1] * 100, 2), np.round(pca32.explained_variance_ratio_[0] * 100, 2), np.round(pca32.explained_variance_ratio_[1] * 100, 2), )) ``` ## Basic machine learning classification model The classification in this tutorial is trivial, so a simple linear model like a logistic regression classifier should be able to learn hot to perfectly classify the images for both image sizes. ``` from sklearn.linear_model import LogisticRegression print('32x32x32') lr32 = LogisticRegression() labels = [0 for x in range(len(images_dict['32x32x32']['t1'][:30]))] + \ [1 for x in range(len(images_dict['32x32x32']['t2'][:30]))] labels_valid = [0 for x in range(len(images_dict['32x32x32']['t1'][30:]))] + \ [1 for x in range(len(images_dict['32x32x32']['t2'][30:]))] lr32.fit( np.concatenate([ images_dict['32x32x32']['t1'][:30], images_dict['32x32x32']['t2'][:30] ], 0), labels ) # Labels T1 are 0s and T2 are 1 labels_t1_train = [0 for _ in preds_t1] labels_t1_valid = [0 for _ in preds_t1_valid] labels_t2_train = [1 for _ in preds_t2] labels_t2_valid = [1 for _ in preds_t2_valid] preds_t1 = lr32.predict(images_dict['32x32x32']['t1'][:30]) preds_t2 = lr32.predict(images_dict['32x32x32']['t2'][:30]) preds_t1_valid = lr32.predict(images_dict['32x32x32']['t1'][30:]) preds_t2_valid = lr32.predict(images_dict['32x32x32']['t2'][30:]) accuracy = sum([1 if pred == target else 0 for (pred, target) in zip( np.concatenate((preds_t1_train, preds_t2_train)), np.concatenate((labels_t1_train, labels_t2_train)))] ) / len(labels) accuracy_valid = sum([1 if pred == target else 0 for (pred, target) in zip( np.concatenate((preds_t1_valid, preds_t2_valid)), np.concatenate((labels_t1_valid, labels_t2_valid)))] ) / len(labels_valid) print('Train Accuracy: ', accuracy) print('Valid Accuracy: ', accuracy_valid) print('128x128x128') lr128 = LogisticRegression() labels = [0 for x in range(len(images_dict['128x128x128']['t1'][:30]))] + \ [1 for x in range(len(images_dict['128x128x128']['t2'][:30]))] labels_valid = [0 for x in range(len(images_dict['128x128x128']['t1'][30:]))] + \ [1 for x in range(len(images_dict['32x32x32']['t2'][30:]))] lr128.fit( np.concatenate([ images_dict['128x128x128']['t1'][:30], images_dict['128x128x128']['t2'][:30] ], 0), labels ) preds_t1_train = lr128.predict(images_dict['128x128x128']['t1'][:30]) preds_t2_train = lr128.predict(images_dict['128x128x128']['t2'][:30]) preds_t1_valid = lr128.predict(images_dict['128x128x128']['t1'][30:]) preds_t2_valid = lr128.predict(images_dict['128x128x128']['t2'][30:]) accuracy = sum([1 if pred == target else 0 for (pred, target) in zip( np.concatenate((preds_t1_train, preds_t2_train)), np.concatenate((labels_t1_train, labels_t2_train)))] ) / len(labels) accuracy_valid = sum([1 if pred == target else 0 for (pred, target) in zip( np.concatenate((preds_t1_valid, preds_t2_valid)), np.concatenate((labels_t1_valid, labels_t2_valid)))] ) / len(labels_valid) print('Train Accuracy: ', accuracy) print('Valid Accuracy: ', accuracy_valid) ```
github_jupyter
# Checking stimuli for balance This notebook helps to ensure that the generated stimuli are roughly balanced between positive and negative trials. ``` import os import numpy as np from PIL import Image import pandas as pd import json import pymongo as pm from glob import glob from IPython.display import clear_output import ast import itertools import random import h5py from tqdm import tqdm import matplotlib.pyplot as plt #display all columns pd.set_option('display.max_columns', None) def list_files(paths, ext='mp4'): """Pass list of folders if there are stimuli in multiple folders. Make sure that the containing folder is informative, as the rest of the path is ignored in naming. Also returns filenames as uploaded to S3""" if type(paths) is not list: paths = [paths] results = [] names = [] for path in paths: results += [y for x in os.walk(path) for y in glob(os.path.join(x[0], '*.%s' % ext))] names += [os.path.basename(os.path.dirname(y))+'_'+os.path.split(y)[1].split('.')[0] for x in os.walk(path) for y in glob(os.path.join(x[0], '*.%s' % ext))] # hdf5s = [r.split("_img.")[0]+".hdf5" for r in results] hdf5s = [r.split("_img.")[0] for r in results] return results,names,hdf5s local_stem = 'XXX' #CHANGE THIS ⚡️ dirnames = [d.split('/')[-1] for d in glob(local_stem+'/*')] data_dirs = [local_stem + d for d in dirnames] stimulus_extension = "hdf5" #what's the file extension for the stims? Provide without dot ## get a list of paths to each one full_stim_paths,filenames, full_hdf5_paths = list_files(data_dirs,stimulus_extension) full_map_paths, mapnames, _ = list_files(data_dirs, ext = 'png') #generate filenames and stimpaths for target/zone map print('We have {} stimuli to evaluate.'.format(len(full_stim_paths))) stim_IDs = [name.split('.')[0] for name in filenames] set_names= ['_'.join(s.split('_')[:-2]) for s in stim_IDs] ## convert to pandas dataframe M = pd.DataFrame([stim_IDs,set_names]).transpose() M.columns = ['stim_ID','set_name'] # if needed, add code to add additional columns # Add trial labels to the metadata using the stimulus metadata.json target_hit_zone_labels = dict() for _dir in data_dirs: with open(_dir + '/metadata.json', 'rb') as f: trial_metas = json.load(f) for i,meta in enumerate(trial_metas): stim_name = meta['stimulus_name'] if stim_name == 'None': #recreate stimname from order in metadata stim_name = str(i).zfill(4) stim_name = _dir.split('/')[-1] + '_' + stim_name # if stim_name[-4:] != "_img": stim_name+='_img' #stimnames need to end in "_img" label = meta['does_target_contact_zone'] target_hit_zone_labels[stim_name] = label print("num positive labels: %d" % sum(list(target_hit_zone_labels.values()))) print("num negative labels: %d" % (len(target_hit_zone_labels) - sum(list(target_hit_zone_labels.values())))) print("ratio",sum(list(target_hit_zone_labels.values())) / (len(target_hit_zone_labels) - sum(list(target_hit_zone_labels.values())))) # make new df with all metadata GT = pd.DataFrame([list(target_hit_zone_labels.keys()), list(target_hit_zone_labels.values())]).transpose() GT.columns = ['stim_ID', 'target_hit_zone_label'] # merge with M M = M.merge(GT, on='stim_ID') print("added labels %s" % list(GT.columns[1:])) metadata = {} #holds all the metadata for all stimuli for name,hdf5_path in tqdm(list(zip([f.split('.')[0] for f in filenames],full_hdf5_paths))): #load hdf5 # print("loading",hdf5_path) try: hdf5 = h5py.File(hdf5_path,'r') #get the static part of the HDF5 stim_name = str(np.array(hdf5['static']['stimulus_name'])) metadatum = {} #metadata for the current stimulus for key in hdf5['static'].keys(): datum = np.array(hdf5['static'][key]) if datum.shape == (): datum = datum.item() #unwrap non-arrays metadatum[key] = datum #close file hdf5.close() metadata[name] = metadatum except Exception as e: print("Error with",hdf5_path,":",e) continue ``` Insert those metadatas into M: ``` for index in M.index: stim_name = M.at[index,'stim_ID'] for key,value in metadata[stim_name].items(): M.at[index,key] = str(value) #insert every item as string M M['label'] = M['target_hit_zone_label'].astype(int) def get_set_base(name): """Group together stims independent of distractors/room. Assumes a naming scheme with `setname_{tdw,box}_occluderinfo`""" return name.split("_tdw")[0].split("_box")[0] M['set_base'] = M['set_name'].apply(get_set_base) ``` ## Analysis How many stimuli? ``` len(M) ``` How many of trials are positive (1) rather than negative (0)? ``` np.mean(M['label']) ``` How many of trials *per set name* are positive (1) rather than negative (0)? ``` M.groupby('set_name').agg({'stim_ID':['count'],'label':['mean']}) ``` How many of trials *per set base* (independent of room or occluders—assumes a naming scheme with `setname_{tdw,box}_occluderinfo`) are positive (1) rather than negative (0)? ``` M.groupby('set_base').agg({'stim_ID':['count'],'label':['mean']}) ```
github_jupyter
<a href="https://colab.research.google.com/github/zahraDehghanian97/Poetry_Generator/blob/master/Word_Poem_generator.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import tensorflow as tf from tensorflow import keras import numpy as np import pickle from nltk.metrics import accuracy ,ConfusionMatrix from nltk.translate.bleu_score import sentence_bleu seqLength = 20 BATCH_SIZE = 64 BUFFER_SIZE = 100 embedding_dim = 256 rnn_units = 1024 ``` # make data ready ``` filepath = "/content/drive/MyDrive/Colab Notebooks/my_shahname_represntation.txt" with open(filepath, "rb") as f: corpus , test = pickle.load(f) corpus = corpus.replace("\t"," \t ").replace("\n", " \n ") corpusList = [w for w in corpus.split(' ')] corpus_words = [i for i in corpusList if i] map(str.strip, corpus_words) vocab = sorted(set(corpus_words)) print(len(corpus_words)) vocab_size = len(vocab) word2idx = {u: i for i, u in enumerate(vocab)} idx2words = np.array(vocab) word_as_int = np.array([word2idx[c] for c in corpus_words]) def split_input_target(chunk): input_text = chunk[:-1] target_text = chunk[1:] return input_text, target_text # examples_per_epoch = len(corpus_words)//(seqLength + 1) wordDataset = tf.data.Dataset.from_tensor_slices(word_as_int) sequencesOfWords = wordDataset.batch(seqLength + 1, drop_remainder=True) dataset = sequencesOfWords.map(split_input_target) dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) ``` # LSTM Model ``` def create_model_lstm(vocab_size, embedding_dim, rnn_units, batch_size): model = tf.keras.Sequential() model.add(tf.keras.layers.Embedding(vocab_size, embedding_dim,batch_input_shape=[batch_size, None])) model.add(tf.keras.layers.LSTM(rnn_units,return_sequences=True,stateful=True,recurrent_initializer='glorot_uniform')) model.add(tf.keras.layers.LSTM(rnn_units,return_sequences=True,stateful=True,recurrent_initializer='glorot_uniform')) model.add(tf.keras.layers.Dense(vocab_size)) return model lstm_model = create_model_lstm(vocab_size = len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE) lstm_model.compile(optimizer='adam', loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) history = lstm_model.fit(dataset, epochs=50) main_lstm_model = create_model_lstm(vocab_size = len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=1) main_lstm_model.set_weights(lstm_model.get_weights()) # main_lstm_model = tf.keras.models.load_model('/content/drive/MyDrive/Colab Notebooks/word_lstm.h5') main_lstm_model.summary() def generate_text(model, start_string): num_generate = 200 start_string_list =[] for w in start_string.split(' '): if w in word2idx : start_string_list.append(w) input_eval = [word2idx[s] for s in start_string_list] input_eval = tf.expand_dims(input_eval, 0) text_generated = [] model.reset_states() for i in range(num_generate): predictions = model(input_eval) predictions = tf.squeeze(predictions, 0) predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy() input_eval = tf.expand_dims([predicted_id], 0) text_generated.append(idx2words[predicted_id]) return (start_string + ' '.join(text_generated)) print(generate_text(main_lstm_model, start_string=u"چنین گفت رستم به اسفندیار")) main_lstm_model.save("/content/drive/MyDrive/Colab Notebooks/word_lstm.h5") ``` # Test ``` BLEU_scores = [] accuracy_scores = [] poem = test[0] start = poem[:25] generated_poem = generate_text(main_lstm_model, start_string=start) BLEU_scores.append(sentence_bleu(poem, generated_poem)) len_min = min(len(poem),len(generated_poem)) accuracy_scores.append(accuracy(poem[:len_min], generated_poem[:len_min])) print("-----------------------") print("start sentence : ",start) print(generated_poem) print("BLEU score = ",BLEU_scores[-1]) print("Accuracy score = ",accuracy_scores[-1]) print("Confusion matrix =") print(ConfusionMatrix(poem[:len_min], generated_poem[:len_min])) counter = 0 for poem in test : counter+=1 start = poem[:25] generated_poem = generate_text(main_lstm_model, start_string=start) BLEU_scores.append(sentence_bleu(poem, generated_poem)) len_min = min(len(poem),len(generated_poem)) accuracy_scores.append(accuracy(poem[:len_min], generated_poem[:len_min])) print("-----------------------") print("sentence number : ",counter) print("BLEU score = ",BLEU_scores[-1]) print("Accuracy score = ",accuracy_scores[-1]) print("<<------------final report----------->>") print("number of test set = ",len(test)) print("mean BLEU score = ",np.mean(BLEU_scores)) print("mean Accuracy score = ",np.mean(accuracy_scores)) ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 畳み込みニューラルネットワーク (Convolutional Neural Networks) <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/images/cnn"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ja/tutorials/images/cnn.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ja/tutorials/images/cnn.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> </table> このチュートリアルでは、MNIST の数の分類をするための、シンプルな[畳み込みニューラルネットワーク](https://developers.google.com/machine-learning/glossary/#convolutional_neural_network) (CNN: Convolutional Neural Network) の学習について説明します。このシンプルなネットワークは MNIST テストセットにおいて、99%以上の精度を達成します。このチュートリアルでは、[Keras Sequential API](https://www.tensorflow.org/guide/keras)を使用するため、ほんの数行のコードでモデルの作成と学習を行うことができます。 Note: GPU を使うことで CNN をより早く学習させることができます。もし、このノートブックを Colab で実行しているならば、*編集 -> ノートブックの設定 -> ハードウェアアクセラレータ -> GPU* から無料のGPUを有効にすることができます。 ### TensorFlowのインポート ``` from __future__ import absolute_import, division, print_function, unicode_literals !pip install tensorflow-gpu==2.0.0-beta1 import tensorflow as tf from tensorflow.keras import datasets, layers, models ``` ### MNISTデータセットのダウンロードと準備 ``` (train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data() train_images = train_images.reshape((60000, 28, 28, 1)) test_images = test_images.reshape((10000, 28, 28, 1)) # ピクセルの値を 0~1 の間に正規化 train_images, test_images = train_images / 255.0, test_images / 255.0 ``` ### 畳み込みの基礎部分の作成 下記の6行のコードは、一般的なパターンで畳み込みの基礎部分を定義しています: [Conv2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D) と [MaxPooling2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D) レイヤーのスタック。 入力として、CNN はバッチサイズを無視して、shape (image_height, image_width, color_channels) のテンソルをとります。color channels について、MNIST は1つ (画像がグレースケールのため) の color channels がありますが、カラー画像には3つ (R, G, B) があります。この例では、MNIST 画像のフォーマットである shape (28, 28, 1) の入力を処理するように CNN を構成します。これを行うには、引数 `input_shape` を最初のレイヤーに渡します。 ``` model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) ``` ここまでのモデルのアーキテクチャを表示してみましょう。 ``` model.summary() ``` 上記より、すべての Conv2D と MaxPooling2D レイヤーの出力は shape (height, width, channels) の 3D テンソルであることがわかります。width と height の寸法は、ネットワークが深くなるにつれて縮小する傾向があります。各 Conv2D レイヤーの出力チャネルの数は、第一引数 (例: 32 または 64) によって制御されます。通常、width とheight が縮小すると、各 Conv2D レイヤーにさらに出力チャネルを追加する余裕が (計算上) できます。 ### 上に Dense レイヤーを追加 モデルを完成するために、(shape (3, 3, 64) の) 畳み込みの基礎部分からの最後の出力テンソルを、1つ以上の Dense レイヤーに入れて分類を実行します。現在の出力は 3D テンソルですが、Dense レイヤーは入力としてベクトル (1D) を取ります。まず、3D 出力を 1D に平滑化 (または展開) してから、最上部に1つ以上の Dense レイヤーを追加します。MNIST は 10 個の出力クラスを持ちます。そのため、我々は最後の Dense レイヤーの出力を 10 にし、softmax関数を使用します。 ``` model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10, activation='softmax')) ``` これが私たちのモデルの完全なアーキテクチャです。 ``` model.summary() ``` ご覧のとおり、2 つの Dense レイヤーを通過する前に、(3, 3, 64) の出力は shape (576) のベクターに平滑化されました。 ### モデルのコンパイルと学習 ``` model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(train_images, train_labels, epochs=5) ``` ### モデルの評価 ``` test_loss, test_acc = model.evaluate(test_images, test_labels) print(test_acc) ``` ご覧のとおり、我々のシンプルな CNN は 99% 以上のテスト精度を達成しています。数行のコードにしては悪くありません!違うスタイルでの CNN の書き方 (Keras Subclassing API や GradientTape を使ったもの) については[ここ](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/quickstart/advanced.ipynb)を参照してください。
github_jupyter
# 第0章 そもそも量子コンピュータとは? 近年、マスコミでも「量子コンピュータ」というワードを耳にすることが多い。「名前だけは聞いたことあるけど、どんなものかはよくわからない…」そんな方のために、この章では、量子コンピュータの概要を説明する。 (なお、「量子」コンピュータの業界では、現在のコンピュータを「古典」コンピュータと呼んでおり、Qunatum Native Dojoでもそれに従う。) ## 量子コンピュータというアイディア 量子コンピュータのアイディア自体は古く、エッセイ「ご冗談でしょう、ファインマンさん」でも有名な物理学者リチャード・ファインマンが、1982年に「自然をシミュレーションしたければ、量子力学の原理でコンピュータを作らなくてはならない」と述べたことに端を発する[1]。そして、1985年にオックスフォード大学の物理学者デイビット・ドイチュによって量子コンピュータが理論的に定式化された[2]。 量子コンピュータと従来の古典コンピュータの最も異なる点は、その情報の表し方である。古典コンピュータの内部では、情報は0か1、どちらか1つの状態を取ることのできる「古典ビット」で表現される。これに対し、量子コンピュータの内部では、情報は0と1の両方の状態を**同時に**取ることのできる「量子ビット」で表現される(詳細はこれからQuantum Native Dojoで学んでいくのでご安心を)。量子コンピュータは、量子ビットを用いて多数の計算を同時に行い、その結果をうまく処理することで、古典コンピュータと比べて飛躍的に高速に計算を行える**場合がある**。 ## どのように役に立つのか さきほど**場合がある**と強調したのは、現実にある全ての問題(タスク)を、量子コンピュータが高速に計算できるわけではないからだ。むしろ、量子コンピュータの方が古典コンピュータより高速なのかどうか未だに分かっていない問題の方が圧倒的に多い。それでも、一部の問題については量子コンピュータの方が**現状の**古典コンピュータのアルゴリズムよりも高速であることが証明されている。その代表例が素因数分解である。 素因数分解は、$30 = 2\times3\times5$ などの簡単な場合なら暗算でも計算できるが、分解すべき整数の桁数が大きくなってくると、最高レベルの速度を持つスーパーコンピュータでさえ、その計算には年単位・あるいは宇宙の寿命ほどの時間が必要になる。この「解くのに時間がかかる」ことを利用したのが、現在の暗号通信・情報セキュリティに広く用いられているRSA暗号である。しかし、1995年に米国の数学者ピーター・ショアが、古典よりも**圧倒的に速く**素因数分解を行う量子アルゴリズムを見つけた[3]ことで、一気に量子コンピュータに注目が集まることになった。 素因数分解の他にも、量子コンピュータの方が現状の古典コンピュータよりも高速であると証明されている問題がいくつかある。例えば、整理されていないデータから目的のデータを探し出す探索問題(8-2節`グローバーのアルゴリズム`)や、連立一次方程式の解を求める問題(7-4節`Harrow-Hassidim-Lloydアルゴリズム`)などである。これらの問題は、流体解析・電磁気解析・機械学習など、現代社会を支える様々な科学技術計算に活用できる。さらに、万物は元をたどれば量子力学に従っているため、ファインマンやドイチュが考えたように、究極の自然現象シミュレーターとして量子コンピュータを活用し、物質設計や素材開発を行うことも考案されている。 このように、量子コンピュータによって世の中の全ての計算が高速化される訳でないものの、現代社会に与えるインパクトは計り知れないものがある。 ## 量子誤り訂正 ここまで述べたのは、量子コンピュータの理論的な話である。理論的に速く計算できると証明できても、応用するには実際に計算を行うハードウェアが必要になる。量子コンピュータのハードウェアを製作する研究は世界中で広く行われているが、課題はまだまだ多いのが現状である。もっとも大きな課題の一つが、ノイズである。量子ビットは古典ビットと比べて磁場や温度揺らぎなどの外部ノイズを非常に受けやすく、保持していた情報をすぐに失ってしまう。2019年現在でも、数個〜数十個程度の量子ビットを連結し、より安定に長く動作させる方法を探っているような段階である。 そのようなノイズの問題を克服するために研究されているのが、量子誤り訂正の技術である。量子誤り訂正は、計算途中に生じた量子ビットの誤り(エラー)を検知し、本来の状態に訂正する技術で、理論的には様々な手法が提案されている(なお、我々が普段使っている古典コンピュータにも古典ビットの誤り訂正機能が搭載されている。この機能のおかけで、我々はPC内のデータが突然無くなることを気にせずに暮らせるのである)。しかし、量子誤り訂正は単なる量子ビットの作製・動作実現よりもはるかに技術的難易度が高く、誤り訂正機能を持った量子ビットを作るには少なくともあと10年は必要であると言われている。 そして、前項であげた量子コンピュータの様々な応用を実用的な規模で実行するには、誤り訂正機能を持った量子ビットが1000個単位で必要となることから、**真の量子コンピュータの現実的応用は数十年先になる**と考えられている。 ## NISQ (Noisy Intermidiate-Scale Quantum) デバイスの時代 では我々が量子コンピュータの恩恵を受けるには、あと何十年も待たないといけないのだろうか。「そんなことで手をこまねいているわけにはいかない!」ということで、科学者たちは様々な方法で量子コンピュータの有用性を示そうと模索している。その中で現在最も注目されており、世界中で研究が進められているのが**Noisy Intermediate-Scale Quantum (NISQ)** デバイスという量子コンピュータである。 NISQデバイスの定義は「ノイズを含む(誤り訂正機能を持たない)、中規模(〜数百qubit)な量子デバイス」であり、これであれば数年以内の実用化が可能であると考えられている。NISQデバイスは、誤り訂正機能がないので限られた量子アルゴリズムしか実行できないものの、量子化学計算や機械学習といった領域で、現在の古典コンピュータを凌駕する性能を発揮すると予想されている(第4章・第5章・第6章参照)。 NISQデバイスまで含めれば、量子コンピュータが応用され始めるのはそう遠くない未来なのである。このQuantum Native Dojoを通して、そうした量子コンピュータ応用の最先端に触れるための知識を皆様に身につけていただければ幸いである。 ## 参考文献 1. “Simulating physics with computers”, R. P. Feybmann, International Journal of Theoretical Physics **21**, 467 [(pdfリンク)](https://people.eecs.berkeley.edu/~christos/classics/Feynman.pdf) 2. “Quantum theory, the Church-Turing principle and the universal quantum computer” Proceedings of the Royal Society of London A **400**, 97 (1985) [(pdfリンク)](https://people.eecs.berkeley.edu/~christos/classics/Deutsch_quantum_theory.pdf) 3. “Polynomial-Time Algorithms for Prime Factorization and Discrete Logarithms on a Quantum Computer”, IAM J.Sci.Statist.Comput. **26** (1997) 1484 [(pdfリンク)](https://arxiv.org/pdf/quant-ph/9508027.pdf) ## 量子コンピュータの基礎から応用までに関するスライド 量子コンピュータの動作原理や応用アプリケーション、政府・企業の動きに関したさらに詳しい解説は、 [Quantum Summit 2019の講演をまとめたスライド](https://speakerdeck.com/qunasys/quantum-summit-2019)をご覧ください。 [![q-summit-top](figs/0/q-summit.png)](https://speakerdeck.com/qunasys/quantum-summit-2019) --- ## コラム:量子ビット・量子ゲート操作を物理的にどう実現するか 実際の量子コンピュータを構成する量子ビットはいったいどのように作られ、量子ゲート操作はどのように実行されているのだろうか。 量子ビットを実現する方法は1995年頃から複数の有望な方式(物理系)が提案されており、超伝導回路方式・イオントラップ方式・光方式などがある。各方式によって、現在実現できている量子ビット数や量子ビットの寿命(コヒーレンス時間)、エラー率等に違いがあり、世界各国で研究が盛んに進められている。 数々の量子ビット実現方式の中で、最も広く知られている方式は超伝導回路を用いた超伝導量子ビットである。これは1999年に当時NECに所属していた中村泰信(現東京大学教授)・蔡兆申(現東京理科大学教授)らによって世界で初めて製作された量子ビットで、超伝導物質を用いたジョセフソン接合と呼ばれる微細な構造を作ることで量子ビットを実現している。量子ゲート操作は、マイクロ波(電磁波の一種)のパルスをターゲットの量子ビットに送ることで実現される。また、量子ビットの測定は測定用の超伝導回路を量子ビットにつけることで行われる。 超伝導回路方式は、GoogleやRigetti conmputingが数十量子ビットの素子の開発を発表するなど、2019年3月現在で最も有望な量子コンピュータ実現方式であると言える。 量子ビットの実現方法について、より深く知りたい場合には以下を参考にされたい: - Qmedia 量子コンピュータを実現するハードウェア - (前編) https://www.qmedia.jp/making-quantum-hardware-1/ - (後編) https://www.qmedia.jp/making-quantum-hardware-2/ - レビュー論文:“Quantum Computing”, T. D. Ladd _et al._, Nature, **464**, 45 (2010). https://arxiv.org/abs/1009.2267 - Nielsen-Chuang 第7章 `Quantum computers: physical realization`
github_jupyter
``` # #colabを使う方はこちらを使用ください。 # !pip install torch==0.4.1 # !pip install torchvision==0.2.1 # !pip install numpy==1.14.6 # !pip install matplotlib==2.1.2 # !pip install pillow==5.0.0 # !pip install opencv-python==3.4.3.18 # !pip install torchtext==0.3.1 import torch import torch.nn as nn import torch.nn.init as init import torch.optim as optim import torch.nn.functional as F #torchtextを使用 from torchtext import data from torchtext import vocab from torchtext import datasets %matplotlib inline import numpy as np from matplotlib import pyplot as plt # データとモデルに.to(device)を指定してgpuの計算資源を使用する。 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device ``` #文章生成 ## データの読み込み ``` tokenize = lambda x: x.split() # 前処理用の機能のFieldをセットアップ #Field TEXT = data.Field(sequential=True, tokenize=tokenize, lower=True, batch_first=True) # データを取得 # The Penn Treebankデータセット。 train_dataset, val_dataset, test_dataset = datasets.PennTreebank.splits(TEXT) TEXT.build_vocab(train_dataset, vectors=vocab.GloVe(name='6B', dim=300)) #全単語数 vocab_size = len(TEXT.vocab) print(vocab_size) # 単語の件数のtop10 print(TEXT.vocab.freqs.most_common(10)) # 単語 print(TEXT.vocab.itos[:10]) #埋め込みベクトルを取得 word_embeddings = TEXT.vocab.vectors # ハイパーパラメータ embedding_length = 300 hidden_size = 256 batch_size = 32 # BPTTIteratorは言語モデル用のイテレータ作成を行います。 # textとtarget属性を持ちます。 train_iter, val_iter, test_iter = data.BPTTIterator.splits((train_dataset, val_dataset, test_dataset) , batch_size=32, bptt_len=30, repeat=False) print(len(train_iter)) print(len(val_iter)) print(len(test_iter)) for i, batch in enumerate(train_iter): print("データの形状確認") print(batch.text.size()) print(batch.target.size()) #BPTTIteratorがBatch firstになってない件は2018/11/24時点では#462がPull requestsがされています。 print("permuteでバッチを先にする") print(batch.text.permute(1, 0).size()) print(batch.target.permute(1, 0).size()) print("データ目の形状とデータを確認") text = batch.text.permute(1, 0) target = batch.target.permute(1, 0) print(text[1,:].size()) print(target[1,:].size()) print(text[1,:].tolist()) print(target[1,:].tolist()) print("データの単語列を表示") print([TEXT.vocab.itos[data] for data in text[1,:].tolist()]) print([TEXT.vocab.itos[data] for data in target[1,:].tolist()]) break ``` ## ネットワークを定義 ``` class LstmLangModel(nn.Module): def __init__(self, batch_size, hidden_size, vocab_size, embedding_length, weights): super(LstmLangModel, self).__init__() self.batch_size = batch_size self.hidden_size = hidden_size self.vocab_size = vocab_size self.embed = nn.Embedding(vocab_size, embedding_length) self.embed.weight.data.copy_(weights) self.lstm = nn.LSTM(embedding_length, hidden_size, batch_first=True) self.fc = nn.Linear(hidden_size, vocab_size) def forward(self, x, h): x = self.embed(x) output_seq, (h, c) = self.lstm(x, h) # 出力を変形する (batch_size*sequence_length, 隠れ層のユニット数hidden_size) out = output_seq.reshape(output_seq.size(0)*output_seq.size(1), output_seq.size(2)) out = self.fc(out) return out, (h, c) net = LstmLangModel(batch_size, hidden_size, vocab_size, embedding_length, word_embeddings) net = net.to(device) # 損失関数、最適化関数を定義 criterion = nn.CrossEntropyLoss() optim = optim.Adam(filter(lambda p: p.requires_grad, net.parameters())) ``` ## 学習 ``` num_epochs = 200 train_loss_list = [] # Truncated backpropagation # 逆伝播を途中で打ち切る def detach(states): return [state.detach() for state in states] for epoch in range(num_epochs): train_loss = 0 # 初期隠れ状態とセル状態を設定する states = (torch.zeros(1, batch_size, hidden_size).to(device), torch.zeros(1, batch_size, hidden_size).to(device)) #train net.train() for i, batch in enumerate(train_iter): text = batch.text.to(device) labels = batch.target.to(device) #LSTMの形状に合わせて入力もバッチを先にする。 text = text.permute(1, 0) labels = labels.permute(1, 0) optim.zero_grad() states = detach(states) outputs, states = net(text, states) loss = criterion(outputs, labels.reshape(-1)) train_loss += loss.item() loss.backward() optim.step() avg_train_loss = train_loss / len(train_iter) print ('Epoch [{}/{}], Loss: {loss:.4f}, Perplexity: {perp:5.2f}' .format(epoch+1, num_epochs, i+1, loss=avg_train_loss, perp=np.exp(avg_train_loss))) train_loss_list.append(avg_train_loss) ``` ## 生成 ``` num_samples = 1000 # サンプリングされる単語の数 # モデルをテストする net.eval() with torch.no_grad(): text = "" # 初期隠れ状態とセル状態を設定する states = (torch.zeros(1, 1, hidden_size).to(device), torch.zeros(1, 1, hidden_size).to(device)) # ランダムに1単語のIDを選択 input = torch.multinomial(torch.ones(vocab_size), num_samples=1).unsqueeze(1).to(device) # print("input word", TEXT.vocab.itos[input]) for i in range(num_samples): # print("input word", TEXT.vocab.itos[input]) output, states = net(input, states) word_id = output.max(1)[1].item() # 次のタイムステップのために単語IDを入力 input.fill_(word_id) # 単語IDから文字を取得 word = TEXT.vocab.itos[word_id] # textに書き込む word = '\n' if word == '<eos>' else word + ' ' text += word # textを表示 print(text) ```
github_jupyter
# Top coding, bottom coding and zero coding ## Outliers An outlier is a data point which is significantly different from the remaining data. “An outlier is an observation which deviates so much from the other observations as to arouse suspicions that it was generated by a different mechanism.” [D. Hawkins. Identification of Outliers, Chapman and Hall , 1980]. Statistics such as the mean and variance are very susceptible to outliers. In addition, **some Machine Learning models are indeed sensitive to outliers and their performance might be impaired by them**. Thus, it is common practice to engineer the features to minimise the impact of outliers on the performance of these algorithms. ### Nature of outliers - Genuine extremely high or extremely low values - Introduced due to mechanical error (wrong measurement) - Introduced by replacing missing values (NA) by a value out of the distribution (as described in previous lectures) In some cases, the presence of outliers is informative, and therefore they deserve further study. In this course I will tackle the engineering of those values that do not add any particular extra information, and could as well be eliminated. ## How can we pre-process outliers? - Mean/median imputation or random sampling - Discrestisation - Discard the outliers: process also called Trimming - Top-coding, bottom-coding and zero-coding: also known as windsorization ### Mean/median imputation or random sampling If we have reasons to believe that the outliers are due to mechanical error or problems during measurement. This means, if the outliers are in nature similar to missing data, then any of the methods discussed for missing data can be applied to replace outliers. Because the number of outliers is in nature small (otherwise they would not be outliers), it is reasonable to use the mean/median imputation to replace them. ### Discretisation Discretisation is the transformation of continuous variables into discrete variables. It involves assigning the variable values into defined groups. For example, for the variable age, we could group the observations (people) into buckets / groups like: 0-20, 21-40, 41-60, > 61. This grouping of the variables in ranges is called discretisation. As you can see, any outlier (extremely high) value of age would be included in the > 61 group, therefore minimising its impact. I will discuss more on the different discretisation methods in the "Discretisation" section of this course. ### Trimming Trimming refers to the removal of the extreme values of a sample. In this procedure, the outliers are identified and those observations removed from the sample. On the down side, these values, may contain useful information for other variables included in the dataset. Thus, likely, we may choose not to remove these observations and handle outliers by top / bottom coding as described below. ## Top-coding, bottom-coding and zero-coding. **Top-conding**, widely used in econometrics and statistics, means capping the maximum of a distribution at an arbitrarily set value. A top-coded variable is one for which data points whose values are above an upper bound are censored. This means in practical terms that all values above the upper band will be arbitrarily set to the upper band. Top-coding is common practice in survey data, before it is released to the public. It is used to preserve the anonymity of respondents. For example, high earners may be easily identifiable by their earnings. Thus, by implementing top-coding, that outlier is capped at a certain maximum value and therefore looks like many other observations, it is not uniquely identifiable any more. Top-coding can be also applied to prevent possibly-erroneous outliers from being published. Bottom-coding is analogous, but on the left side of the distribution. This is, all values below a certain threshold, are capped at that threshold. If the threshold is zero, then it is known as **zero-coding**, e.g. if amounts below zero are reported as zero. Good examples would be the variable "age", or the variable "earnings". It is not possible to have negative age or a negative salary, thus, it is reasonable to cap the lowest values at zero. Any observation with a value under zero must have been introduced by mistake. Top-coding and bottom-coding are indeed used in practice to remove outliers of variables and therefore prevent model over-fitting. For an example in a financial institution, look at my talk in [pydata](https://www.google.co.uk/url?sa=t&rct=j&q=&esrc=s&source=web&cd=2&cad=rja&uact=8&ved=0ahUKEwiEtaG7p6fXAhVI2hoKHWqQBsMQtwIILTAB&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DKHGGlozsRtA&usg=AOvVaw13tQ7UEv3w1k_RLsEbB3aB). #### Note Top-coding may affect estimates of the standard errors of the variable, or change the variable distribution, by censoring those values at the far end of the tails. ### Identifying outliers #### Extreme Value Analysis The most basic form of outlier detection is Extreme Value Analysis of 1-dimensional data. The key for this method is to determine the statistical tails of the underlying distribution of the variable, and then finding the values that sit at the very end of the tails. In the typical scenario, the distribution of the variable is Gaussian and thus outliers will lie outside the mean plus or minus 3 times the standard deviation of the variable. If the variable is not normally distributed, a general approach is to calculate the quantiles, and then the interquantile range (IQR), as follows: IQR = 75th quantile - 25th quantile An outlier will sit outside the following upper and lower boundaries: Upper boundary = 75th quantile + (IQR * 1.5) Lower boundary = 25th quantile - (IQR * 1.5) or for extreme cases: Upper boundary = 75th quantile + (IQR * 3) Lower boundary = 25th quantile - (IQR * 3) ======================================================================= Below I will demonstrate top-coding in real-life datasets. We have seen an intuition of how this improves machine learning algorithms in the lecture "Outliers" in the section "Type of problems within variables". ============================================================================= ## Real Life example: ### Predicting Survival on the Titanic: understanding society behaviour and beliefs Perhaps one of the most infamous shipwrecks in history, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 people on board. Interestingly, by analysing the probability of survival based on few attributes like gender, age, and social status, we can make very accurate predictions on which passengers would survive. Some groups of people were more likely to survive than others, such as women, children, and the upper-class. Therefore, we can learn about the society priorities and privileges at the time. ### Lending Club **Lending Club** is a peer-to-peer Lending company based in the US. They match people looking to invest money with people looking to borrow money. When investors invest their money through Lending Club, this money is passed onto borrowers, and when borrowers pay their loans back, the capital plus the interest passes on back to the investors. It is a win for everybody as they can get typically lower loan rates and higher investor returns. If you want to learn more about Lending Club follow this link: https://www.lendingclub.com/ The Lending Club dataset contains complete loan data for all loans issued through the 2007-2015, including the current loan status (Current, Late, Fully Paid, etc.) and latest payment information. Features (aka variables) include credit scores, number of finance inquiries, address including zip codes and state, and collections among others. Collections indicates whether the customer has missed one or more payments and the team is trying to recover their money. The file is a matrix of about 890 thousand observations and 75 variables. More detail on this dataset can be found in Kaggle's website: https://www.kaggle.com/wendykan/lending-club-loan-data ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline from sklearn.model_selection import train_test_split pd.set_option('display.max_columns', None) # to display the total number columns present in the dataset ``` ## Titanic dataset ``` # let's load the titanic dataset data = pd.read_csv('titanic.csv') data.head() ``` ### Top-coding important Top-coding and bottom-coding, as any other feature pre-processing step, should be determined over the training set, and then transferred onto the test set. This means that we should find the upper and lower bounds in the training set only, and use those bands to cap the values in the test set. ``` # divide dataset into train and test set X_train, X_test, y_train, y_test = train_test_split(data, data.Survived, test_size=0.3, random_state=0) X_train.shape, X_test.shape ``` There are 2 numerical variables in this dataset, Fare and Age. So let's go ahead and find out whether there are values that we could consider outliers ### Fare ``` # First let's plot a histogram to get an idea of the distribution fig = X_train.Fare.hist(bins=50) fig.set_title('Fare Paid Distribution') fig.set_xlabel('Fare') fig.set_ylabel('Number of Passengers') sns.kdeplot(X_train.Fare) ``` Because the distribution of Fare is skewed, we should estimate outliers using the quantile method instead of the Gaussian distribution. ``` # visualising outliers using boxplots and whiskers, which provides the quantiles # and inter-quantile range, with the outliers sitting outside the error bars. # All the dots in the plot below are outliers according to the 1.5 IQR rule fig = sns.boxplot(y='Fare', data=X_train) fig.set_xlabel('Fare') fig.set_ylabel('Number of Passengers') ``` The outliers, according to the above plot, lie all at the right side of the distribution. This is, some people paid extremely high prices for their tickets. Therefore, in this variable, only extremely high values will affect the performance of our machine learning models, and we need to do therefore top-coding. Bottom coding in this case it is not necessary. At least not to improve the performance of the machine learning algorithms. ``` # let's look at the values of the quantiles so we can calculate the upper and lower boundaries for the outliers X_train.Fare.describe() # top coding: upper boundary for outliers according to interquantile proximity rule IQR = data.Fare.quantile(0.75) - data.Fare.quantile(0.25) Upper_fence = X_train.Fare.quantile(0.75) + (IQR * 3) Upper_fence ``` The upper boundary, above which every value is considered an outlier is a cost of 100 dollars for the Fare. ``` # lets look at the actual number of passengers that paid more than USS 100 print('total passengers: {}'.format(X_train.shape[0])) print('passengers that paid more than 100: {}'.format(X_train[X_train.Fare>100].shape[0])) print('percentage of outliers: {}'.format(X_train[X_train.Fare>100].shape[0]/np.float(X_train.shape[0]))) # top-coding: capping the variable Fare at 100 X_train.loc[X_train.Fare>100, 'Fare'] = 100 X_test.loc[X_test.Fare>100, 'Fare'] = 100 X_train.Fare.max(), X_test.Fare.max() ``` This is all we need to remove outliers from a machine learning perspective. However, note that in the dataset, there are also a few passengers that paid zero for their tickets ``` X_train[X_train.Fare==0].shape X_train[X_train.Fare==0] ``` The majority of them do not have a Cabin assigned, and could therefore have jumped on the boat illegally. Alternatively, there could also be that that information could not be retrieved, so we do not know how much they paid. But we do know that the cheapest ticket was 5 dollars, see below: ``` X_train[X_train.Fare!=0]['Fare'].min() ``` In situations like this, it is best to discuss with the data owner (in business, someone who knows the data well) the nature of the data, and the importance of the variable. If the 0 values in this case mean that the data could not be retrieved properly, and therefore is in nature an NaN, one could choose to replace them by a random sample or mean/median imputation, or to do bottom-coding. If the case of zero corresponds otherwise to people jumping on the boat illegally, one may choose to leave them as zero. ### Age ``` # First let's plot the histogram to get an idea of the distribution fig = X_train.Age.hist(bins=50) fig.set_title('Age Distribution') fig.set_xlabel('Age') fig.set_ylabel('Number of Passengers') sns.kdeplot(X_train.Age) ``` Although it does not look strictly normal, we could assume normality and use the Gaussian approach to find outliers. See below. ``` # now let's plot the boxplots and whiskers fig = sns.boxplot(y='Age', data=X_train) fig.set_xlabel('Age') fig.set_ylabel('Number of Passengers') ``` Again, for this variable the outliers lie only on the right of the distribution. Therefore we only need to introduce top-coding. ``` # and let's get the numbers to calculate the upper boundary X_train.Age.describe() # Assuming normality Upper_boundary = X_train.Age.mean() + 3* X_train.Age.std() Upper_boundary # let's find out whether there are outliers according to the above boundaries # remember that Age has ~ 20% missing values total_passengers = np.float(X_train.shape[0]) print('total passengers: {}'.format(X_train.Age.dropna().shape[0]/total_passengers)) print('passengers older than 73 (Gaussian app): {}'.format(X_train[X_train.Age>73].shape[0]/total_passengers)) X_train.loc[X_train.Age>73, 'Age'] = 73 X_test.loc[X_test.Age>73, 'Age'] = 73 X_train.Age.max(), X_test.Age.max() ``` In the test set, there were no outliers, as the maximum Age value is 70, below the value we used to cap outliers. ## Loan book from Lending Club ``` # we will examine only the income variable, as this is one that typically shows outliers. # a few people are high earners, and the remaining of the borrowers fall within a normal-ish distribution data = pd.read_csv('loan.csv', usecols=['annual_inc'], nrows=30000) data.head() fig = data.annual_inc.hist(bins=500) fig.set_xlim(0,500000) sns.boxplot(y='annual_inc', data=data) ``` As expected, outliers sit on the right of the distribution. Therefore, we will perform top-coding. ``` data.annual_inc.describe() # because the distribution is not completely normal, I choose to examine outliers with the interquantal # distance IQR = data.annual_inc.quantile(0.75) - data.annual_inc.quantile(0.25) Upper_fence = data.annual_inc.quantile(0.75) + (IQR * 1.5) Upper_fence_ext = data.annual_inc.quantile(0.75) + (IQR * 3) Upper_fence, Upper_fence_ext # let's look at the percentage of high earners within each extreme bucket total_borrowers = np.float(data.shape[0]) print('total borrowers: {}'.format(data.annual_inc.shape[0]/total_borrowers)) print('borrowers than earn > 146k: {}'.format(data[data.annual_inc>146000].shape[0]/total_borrowers)) print('borrowers than earn > 210k: {}'.format(data[data.annual_inc>210000].shape[0]/total_borrowers)) # top-coding data['annual_capped'] = np.where(data.annual_inc>210000, 210000, data.annual_inc) data.describe() ``` We see the effect of capping on the overall distribution of the variable. The standard deviation is smaller, and so is the maximum value. ``` fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) data.annual_inc.plot(kind='kde', ax=ax) data.annual_capped.plot(kind='kde', ax=ax, color = 'red') lines, labels = ax.get_legend_handles_labels() labels = ['Income original', 'Income capped'] ax.legend(lines, labels, loc='best') ax.set_xlim(0,500000) ``` We can observe the effect of top codding on the variable distribution. The maximum value corresponds now to the value we set as a cap. And we observe a peak in that value, that indicates that people that earn more than the cap, are now grouped together under a capped maximum salary. **That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
github_jupyter
``` import numpy as np import pandas as pd from matplotlib import pyplot as plt titanic = pd.read_csv('./titanic.csv') titanic.head(3) ``` ## Summary statistics ### Summarizing numerical data - .mean() - .median() - .min() - .maxx() - .var() - .std() - .sum() - .quantile() ``` titanic['Age'].mean() titanic['Age'].mode() titanic.Age.min() titanic.Age.max() titanic['Age'].var() #<--Return unbiased variance over requested axis. titanic['Age'].quantile() #<--Return values at the given quantile over requested axis. titanic['Age'].std() titanic['Age'].sum() ``` ### summarizing dates ### .agg() method ##### on Single column ``` def pct30(column): return column.quantile(0.3) titanic['Age'].agg(pct30)#<-- applying agg() on a column using simple function titanic['Age'].agg(lambda x: x.quantile(.3)) #<-- using lambda function ``` ##### on multiple column ``` titanic[['Age', 'Fare']].agg(lambda x: x.quantile(0.3)) ``` ##### multiple summaries ``` def pct30(column): return column.quantile(0.3) def pct40(column): return column.quantile(0.4) titanic['Age'].agg([pct30,pct40]) ``` ### cumulative statistics - .cumsum() - .cummax() - .cummin() - .cumprod() ``` pd.DataFrame(titanic['Age'].cumsum()).head(4) ``` ## Counting #### Dropping duplicate names ``` titanic.drop_duplicates(subset = "Pclass") titanic.drop_duplicates(subset = ["Pclass", 'SibSp']) ``` #### .values_count() ``` pd.DataFrame(titanic['Age'].value_counts()) pd.DataFrame(titanic['Age'].value_counts(sort=True)) pd.DataFrame(titanic['Age'].value_counts(normalize=True)) ``` ## Group summary satistics ``` titanic[titanic['Sex'] == 'male']['Age'].mean() titanic[titanic['Sex'] == 'female']['Age'].mean() titanic.groupby('Sex')['Age'].mean() titanic.groupby(['Survived', 'Sex'])['Age'].count() # < -- multiple group titanic.groupby('Sex')['Age'].agg(['count', 'min', 'max'])# <-- multiple stats titanic.groupby(['Survived', 'Sex'])[['Age', 'SibSp']].mean() titanic.groupby(['Survived', 'Sex'])[['Age', 'SibSp']].agg(['count', 'min', 'max']) ``` ## Pivot tables **Signature**: titanic.pivot_table( values=None, index=None, columns=None, aggfunc='mean', fill_value=None, margins=False, dropna=True, margins_name='All', observed=False, ) ``` titanic.groupby('Sex')['Age'].mean() #pivot and implicitly define agffunc=np.mean titanic.pivot_table(values = 'Age', index='Sex') #explicitly define statistics i:e np.median titanic.pivot_table(values= 'Age', index='Sex', aggfunc=np.median) #multiple statistics titanic.pivot_table(values='Age', index='Sex', aggfunc=[np.std, np.median]) ``` #### pivot on two varibales ``` #in groupby # titanic.groupby(['Survived','Sex'])['Age'].mean().unstack() #pivot on two varibales titanic.pivot_table(values='Age', index='Sex', columns='Survived') ``` #### filling missing values in pivot table ``` titanic.pivot_table(values='Age', index='Sex', columns='Survived', fill_value=0) ``` #### summing with pivot table ``` titanic.pivot_table(values='Age', index='Sex', columns='Survived', fill_value=0, margins=True) titanic.pivot_table(values='Age', index='Sex', columns='Survived', fill_value=0, margins=True, margins_name='mean') ```
github_jupyter
<a href="https://colab.research.google.com/github/u-masao/YutaroOgawa_pytorch_advanced/blob/master/1_image_classification/1-3_transfer_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # はじめに 『つくりながら学ぶ! PyTorchによる発展ディープラーニング』 のサンプルコードを Google Colaboratory で動作にしました。 オリジナルリポジトリ > https://github.com/YutaroOgawa/pytorch_advanced ``` ! git clone https://github.com/YutaroOgawa/pytorch_advanced.git ! ln -s pytorch_advanced/1_image_classification/data data import os import urllib.request import zipfile data_dir="./data" # ImageNetのclass_indexをダウンロードする # Kerasで用意されているものです # https://github.com/fchollet/deep-learning-models/blob/master/imagenet_utils.py url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json" save_path = os.path.join(data_dir, "imagenet_class_index.json") if not os.path.exists(save_path): urllib.request.urlretrieve(url, save_path) # 1.3節で使用するアリとハチの画像データをダウンロードし解凍します # PyTorchのチュートリアルで用意されているものです # https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html url = "https://download.pytorch.org/tutorial/hymenoptera_data.zip" save_path = os.path.join(data_dir, "hymenoptera_data.zip") if not os.path.exists(save_path): urllib.request.urlretrieve(url, save_path) # ZIPファイルを読み込み zip = zipfile.ZipFile(save_path) zip.extractall(data_dir) # ZIPを解凍 zip.close() # ZIPファイルをクローズ # ZIPファイルを消去 os.remove(save_path) ``` # 1.3「転移学習」で少量データの分類を実現する方法 - 本ファイルでは、学習済みのVGGモデルを使用し、転移学習でアリとハチの画像を分類するモデルを学習します # 学習目標 1. 画像データからDatasetを作成できるようになる 2. DataSetからDataLoaderを作成できるようになる 3. 学習済みモデルの出力層を任意の形に変更できるようになる 4. 出力層の結合パラメータのみを学習させ、転移学習が実装できるようになる ``` # パッケージのimport import glob import os.path as osp import random import numpy as np import json from PIL import Image from tqdm import tqdm import matplotlib.pyplot as plt %matplotlib inline import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as data import torchvision from torchvision import models, transforms # 乱数のシードを設定 torch.manual_seed(1234) np.random.seed(1234) random.seed(1234) ``` # DataSetを作成 ``` # 入力画像の前処理をするクラス # 訓練時と推論時で処理が異なる class ImageTransform(): """ 画像の前処理クラス。訓練時、検証時で異なる動作をする。 画像のサイズをリサイズし、色を標準化する。 訓練時はRandomResizedCropとRandomHorizontalFlipでデータオーギュメンテーションする。 Attributes ---------- resize : int リサイズ先の画像の大きさ。 mean : (R, G, B) 各色チャネルの平均値。 std : (R, G, B) 各色チャネルの標準偏差。 """ def __init__(self, resize, mean, std): self.data_transform = { 'train': transforms.Compose([ transforms.RandomResizedCrop( resize, scale=(0.5, 1.0)), # データオーギュメンテーション transforms.RandomHorizontalFlip(), # データオーギュメンテーション transforms.ToTensor(), # テンソルに変換 transforms.Normalize(mean, std) # 標準化 ]), 'val': transforms.Compose([ transforms.Resize(resize), # リサイズ transforms.CenterCrop(resize), # 画像中央をresize×resizeで切り取り transforms.ToTensor(), # テンソルに変換 transforms.Normalize(mean, std) # 標準化 ]) } def __call__(self, img, phase='train'): """ Parameters ---------- phase : 'train' or 'val' 前処理のモードを指定。 """ return self.data_transform[phase](img) # 訓練時の画像前処理の動作を確認 # 実行するたびに処理結果の画像が変わる # 1. 画像読み込み image_file_path = './data/goldenretriever-3724972_640.jpg' img = Image.open(image_file_path) # [高さ][幅][色RGB] # 2. 元の画像の表示 plt.imshow(img) plt.show() # 3. 画像の前処理と処理済み画像の表示 size = 224 mean = (0.485, 0.456, 0.406) std = (0.229, 0.224, 0.225) transform = ImageTransform(size, mean, std) img_transformed = transform(img, phase="train") # torch.Size([3, 224, 224]) # (色、高さ、幅)を (高さ、幅、色)に変換し、0-1に値を制限して表示 img_transformed = img_transformed.numpy().transpose((1, 2, 0)) img_transformed = np.clip(img_transformed, 0, 1) plt.imshow(img_transformed) plt.show() # アリとハチの画像へのファイルパスのリストを作成する def make_datapath_list(phase="train"): """ データのパスを格納したリストを作成する。 Parameters ---------- phase : 'train' or 'val' 訓練データか検証データかを指定する Returns ------- path_list : list データへのパスを格納したリスト """ rootpath = "./data/hymenoptera_data/" target_path = osp.join(rootpath+phase+'/**/*.jpg') print(target_path) path_list = [] # ここに格納する # globを利用してサブディレクトリまでファイルパスを取得する for path in glob.glob(target_path): path_list.append(path) return path_list # 実行 train_list = make_datapath_list(phase="train") val_list = make_datapath_list(phase="val") train_list # アリとハチの画像のDatasetを作成する class HymenopteraDataset(data.Dataset): """ アリとハチの画像のDatasetクラス。PyTorchのDatasetクラスを継承。 Attributes ---------- file_list : リスト 画像のパスを格納したリスト transform : object 前処理クラスのインスタンス phase : 'train' or 'test' 学習か訓練かを設定する。 """ def __init__(self, file_list, transform=None, phase='train'): self.file_list = file_list # ファイルパスのリスト self.transform = transform # 前処理クラスのインスタンス self.phase = phase # train or valの指定 def __len__(self): '''画像の枚数を返す''' return len(self.file_list) def __getitem__(self, index): ''' 前処理をした画像のTensor形式のデータとラベルを取得 ''' # index番目の画像をロード img_path = self.file_list[index] img = Image.open(img_path) # [高さ][幅][色RGB] # 画像の前処理を実施 img_transformed = self.transform( img, self.phase) # torch.Size([3, 224, 224]) # 画像のラベルをファイル名から抜き出す if self.phase == "train": label = img_path[30:34] elif self.phase == "val": label = img_path[28:32] # ラベルを数値に変更する if label == "ants": label = 0 elif label == "bees": label = 1 return img_transformed, label # 実行 train_dataset = HymenopteraDataset( file_list=train_list, transform=ImageTransform(size, mean, std), phase='train') val_dataset = HymenopteraDataset( file_list=val_list, transform=ImageTransform(size, mean, std), phase='val') # 動作確認 index = 0 print(train_dataset.__getitem__(index)[0].size()) print(train_dataset.__getitem__(index)[1]) ``` # DataLoaderを作成 ``` # ミニバッチのサイズを指定 batch_size = 32 # DataLoaderを作成 train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True) val_dataloader = torch.utils.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=False) # 辞書型変数にまとめる dataloaders_dict = {"train": train_dataloader, "val": val_dataloader} # 動作確認 batch_iterator = iter(dataloaders_dict["train"]) # イテレータに変換 inputs, labels = next( batch_iterator) # 1番目の要素を取り出す print(inputs.size()) print(labels) ``` # ネットワークモデルの作成する ``` # 学習済みのVGG-16モデルをロード # VGG-16モデルのインスタンスを生成 use_pretrained = True # 学習済みのパラメータを使用 net = models.vgg16(pretrained=use_pretrained) print(net) # VGG16の最後の出力層の出力ユニットをアリとハチの2つに付け替える net.classifier[6] = nn.Linear(in_features=4096, out_features=2) print(net) # 訓練モードに設定 net.train() print('ネットワーク設定完了:学習済みの重みをロードし、訓練モードに設定しました') ``` # 損失関数を定義 ``` # 損失関数の設定 criterion = nn.CrossEntropyLoss() ``` # 最適化手法を設定 ``` # 転移学習で学習させるパラメータを、変数params_to_updateに格納する params_to_update = [] # 学習させるパラメータ名 update_param_names = ["classifier.6.weight", "classifier.6.bias"] # 学習させるパラメータ以外は勾配計算をなくし、変化しないように設定 for name, param in net.named_parameters(): if name in update_param_names: param.requires_grad = True params_to_update.append(param) print(name) else: param.requires_grad = False # params_to_updateの中身を確認 print("-----------") print(params_to_update) # 最適化手法の設定 optimizer = optim.SGD(params=params_to_update, lr=0.001, momentum=0.9) ``` # 学習・検証を実施 ``` # モデルを学習させる関数を作成 def train_model(net, dataloaders_dict, criterion, optimizer, num_epochs): # epochのループ for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch+1, num_epochs)) print('-------------') # epochごとの学習と検証のループ for phase in ['train', 'val']: if phase == 'train': net.train() # モデルを訓練モードに else: net.eval() # モデルを検証モードに epoch_loss = 0.0 # epochの損失和 epoch_corrects = 0 # epochの正解数 # 未学習時の検証性能を確かめるため、epoch=0の訓練は省略 if (epoch == 0) and (phase == 'train'): continue # データローダーからミニバッチを取り出すループ for inputs, labels in tqdm(dataloaders_dict[phase]): # optimizerを初期化 optimizer.zero_grad() # 順伝搬(forward)計算 with torch.set_grad_enabled(phase == 'train'): outputs = net(inputs) loss = criterion(outputs, labels) # 損失を計算 _, preds = torch.max(outputs, 1) # ラベルを予測 # 訓練時はバックプロパゲーション if phase == 'train': loss.backward() optimizer.step() # イタレーション結果の計算 # lossの合計を更新 epoch_loss += loss.item() * inputs.size(0) # 正解数の合計を更新 epoch_corrects += torch.sum(preds == labels.data) # epochごとのlossと正解率を表示 epoch_loss = epoch_loss / len(dataloaders_dict[phase].dataset) epoch_acc = epoch_corrects.double( ) / len(dataloaders_dict[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # 学習・検証を実行する num_epochs=2 train_model(net, dataloaders_dict, criterion, optimizer, num_epochs=num_epochs) ``` 以上
github_jupyter
# EnKF Assumption Experiments ### Keiran Suchak Assumptions to test: * Normality of prior * Normality of likelihood * Subsequent normality of posterior This notebook will make use of the `multivariate_normality()` function from the `pingouin` package to perform multidimensional normality tests. ## Imports ``` import matplotlib.pyplot as plt import numpy as np import pandas as pd import pickle import pingouin as pg import seaborn as sns import sys %matplotlib inline sys.path.append('../../../stationsim/') from ensemble_kalman_filter import EnsembleKalmanFilter, EnsembleKalmanFilterType from stationsim_gcs_model import Model np.random.seed(28) ``` ## Functions ``` def tidy_dataframe(df, independent_col: str, dependent_cols: list): output = list() for i, row in df.iterrows(): for col in dependent_cols: d = {independent_col: row[independent_col], 'variable': col, 'value': row[col]} output.append(d) output = pd.DataFrame(output) return output ``` ## Experiment 0: Testing `pg.multivariate_normality()` Create a sample of 5000 $x$-$y$ coordinates from a 2-dimensional normal distribution. ``` mean = [0, 0] cov = [[1, 0], [0, 100]] x, y = np.random.multivariate_normal(mean, cov, 5000).T ``` Plot samples in $x$-$y$ space. ``` plt.figure() plt.plot(x, y, 'x') plt.xlabel('x') plt.ylabel('y') plt.show() ``` Test for normality. ``` X = pd.DataFrame({'x': x, 'y': y}) pg.multivariate_normality(X, alpha=0.05) ``` The test did not find sufficient evidence to reject the null hypothesis, i.e. the data are normally distributed. Let us now consider data drawn from a distribution that is not gaussian. In this case, we draw the $x$-$y$ coordinates from two uniform distributions, \[0.0, 1.0\). ``` x, y = np.random.random_sample((2, 5000)) ``` Plot samples in $x$-$y$ space. ``` plt.figure() plt.plot(x, y, 'x') plt.xlabel('x') plt.ylabel('y') plt.show() ``` Test for normality. ``` X = pd.DataFrame({'x': x, 'y': y}) pg.multivariate_normality(X, alpha=0.05) ``` The test correctly finds sufficient evidence to reject the null hypothesis that the data are normally distributed. We can make a couple of functions to generate normally- and uniformly-distributed samples of arbitrary size and check that the test works for different sample sizes. ``` def normal_sample_2d(N): mean = [0, 0] cov = [[1, 0], [0, 100]] x, y = np.random.multivariate_normal(mean, cov, N).T X = pd.DataFrame({'x': x, 'y': y}) return X def uniform_sample_2d(N): x, y = np.random.random_sample((2, N)) X = pd.DataFrame({'x': x, 'y': y}) return X def test_multidim_normality(X): t = pg.multivariate_normality(X) return t.normal ``` Now we can run through a collection of different sample sizes, each time generating that number of random samples from both normal and uniform distributions and testing whether `pg.multivariate_normality()` found the samples to be normally distributed, or whether sufficient evidence was found to reject the null hypothesis. The sample sizes to be used are `[10, 20, 50, 100, 200, 500, 1000, 2000]`. This selection has been chosen to observe how the test performs on different scales of sample size. In each case, the testing process shall be run $20$ times to account for the randomness of the samples and the fact that the test may incorrectly consider normally distributed data to be non-normal (or vice-versa). ``` results = list() sample_sizes = [10, 20, 50, 100, 200, 500, 1000, 2000] n_runs = 20 for ss in sample_sizes: for _ in range(n_runs): d = {'sample_size': ss} normal_sample = normal_sample_2d(ss) uniform_sample = uniform_sample_2d(ss) d['gaussian'] = test_multidim_normality(normal_sample) d['non-gaussian'] = test_multidim_normality(uniform_sample) results.append(d) ``` Let's convert these results into a dataframe. ``` results = pd.DataFrame(results) results.head() ``` We can now find the proprtion of cases in each scenario for which the test correctly accepted/rejected the null hypothesis. ``` proportions = list() for ss in sample_sizes: tdf = results.loc[results['sample_size']==ss, ] d = {'sample_size': ss} d['gaussian'] = tdf['gaussian'].sum() / len(tdf['gaussian']) d['non-gaussian'] = tdf['non-gaussian'].sum() / len(tdf['non-gaussian']) proportions.append(d) ``` Again, converting this to a dataframe. ``` proportions = pd.DataFrame(proportions) proportions.head() plt.figure() plt.semilogx(proportions['sample_size'], proportions['gaussian'], label='np.random.multivariate_normal') plt.semilogx(proportions['sample_size'], proportions['non-gaussian'], label='np.random.random_sample') plt.xlabel('Sample size') plt.ylabel('Proportion accepted as gaussian') plt.legend() plt.show() ``` From the above figure, we can see that the test correctly identifies data from `np.random.multivariate_normal()` as gaussian the majority of the time for all sample sizes. We can also see that, for very small sample sizes (i.e. $N<50$), the test typically does not find sufficient evidence to reject the null hypothesis of normality for non-gaussian data. We should, therefore, ensure that our sample sizes are sufficiently large when using the test with data from the Ensemble Kalman Filter. It is also worth considering how this scales with the dimensions of the data - when working with state vectors from the Ensemble Kalman Filter, we consider our sample size to be the filter's ensemble size and twice the model population size to be our number of dimensions. In order to test this, we will need updated version of the functions used to generate samples - the new versions of these functions should generalise such that we can generate $m$-dimensional data samples. ``` def __convert_to_df(Y, m): d = {'var_{0}'.format(i): Y[i] for i in range(m)} X = pd.DataFrame(d) return X def normal_sample_d(N, m): mean = np.zeros(m) cov = np.identity(m) Y = np.random.multivariate_normal(mean, cov, N).T X = __convert_to_df(Y, m) return X def uniform_sample_d(N, m): Y = np.random.random_sample((m, N)) X = __convert_to_df(Y, m) return X ``` Now that we have constructed the functions, let us test them out with $200$ samples from $10$-dimensional distribtutions. We can start by sampling from the multivariate normal distribution: ``` z = normal_sample_d(200, 10) z z.shape ``` And just to check, we can plot the first $2$ dimensions. ``` plt.figure() plt.plot(z['var_0'], z['var_1'], 'x') plt.xlabel('x') plt.ylabel('y') plt.show() ``` Similarly we can sample from the uniform distribution. ``` z = uniform_sample_d(200, 10) z ``` And we can, again, plot the first $2$ dimensions. ``` plt.figure() plt.plot(z['var_0'], z['var_1'], 'x') plt.xlabel('x') plt.ylabel('y') plt.show() ``` Well it looks like these functions work! Now we can make use of them in conjunction with `pg.multivariate_normality()` to see how the normality test responds to different population sizes and ensemble sizes (otherwise referred to as different dimensionalities and sample sizes). ``` results = list() sample_sizes = [50, 100, 200, 500, 1000] dimensionalities = list(range(5, 105, 5)) n_runs = 20 for ss in sample_sizes: for dimensionality in dimensionalities: print(f'Running sample size={ss}, dimensionality={dimensionality}') for _ in range(n_runs): # Make dictionary for gaussian data normal_sample = normal_sample_d(ss, dimensionality) test_result = test_multidim_normality(normal_sample) d = {'sample_size': ss, 'dimensionality': dimensionality, 'kind': 'gaussian', 'result': test_result} results.append(d) # Make dictionary for non-gaussian data uniform_sample = uniform_sample_d(ss, dimensionality) test_result = test_multidim_normality(uniform_sample) d = {'sample_size': ss, 'dimensionality': dimensionality, 'kind': 'non-gaussian', 'result': test_result} results.append(d) ``` Convert results to a dataframe. ``` results = pd.DataFrame(results) results.head() results['dimensionality'].unique() ``` We now wish to visualise how the number of To do this, we first create a filtered dataset where we filter out the rows for which the tests returned false. ``` results_f = results.loc[results['result']==True, ['sample_size', 'dimensionality', 'kind']] results_f.head() results_f['dimensionality'].unique() ``` We can now create our kde-plot, segregating the data for gaussian and non-gaussian samples. ``` g = sns.FacetGrid(results_f, col='dimensionality', col_wrap=4) g.map_dataframe(sns.histplot, x='sample_size', hue='kind', kde=True, log_scale=True) g.set_axis_labels('Sample size', 'Gaussian sample sets') g.add_legend() ``` ## Experiment 1: Pure forecasting In this experiment, we will not be assimilating any data, i.e. each step of the model ensemble will only consist of the forecast process. Following each forecast step, the ensemble will be tested for normality using the `multivariate_normality()` function from the `pingouin` package. In order to keep the process simple at this stage, a small population of agents will be used, allowing us to use a large ensemble size. This will act as a preliminary experiment towards demonstrating the normality of the ensemble prior distribution. ``` results = list() # Set up filter parameters ensemble_size = 50 pop_size = 2 state_vec_length = 2 * pop_size # Initialise filter with StationSim and params filter_params = {'vanilla_ensemble_size': ensemble_size, 'state_vector_length': state_vec_length, 'mode': EnsembleKalmanFilterType.STATE} model_params = {'pop_total': pop_size, 'station': 'Grand_Central', 'do_print': False} enkf = EnsembleKalmanFilter(Model, filter_params, model_params, filtering=False, benchmarking=True) while enkf.active: enkf.baseline_step() results.append(enkf.vanilla_state_ensemble.copy()) len(results) all_xs = list() all_ys = list() for i in range(10, len(results), len(results)//10): state = results[i] xs = state[::2] ys = state[1::2] all_xs.append(xs) all_ys.append(ys) plt.figure() plt.scatter(xs, ys, s=5, marker='.') plt.xlim((0, 750)) plt.ylim((0, 750)) plt.show() norm_results = list() for state in results: stateT = state.T normality = pg.multivariate_normality(stateT, alpha=1) norm_results.append(normality.normal) sum(norm_results) print(all_xs[0]) print(all_ys[0]) for xs in all_xs: plt.figure() for x in xs: plt.hist(x, alpha=0.5) plt.show() for ys in all_ys: plt.figure() for y in ys: plt.hist(y, alpha=0.5) plt.show() ```
github_jupyter
# BHPToolkit Spring 2020 Workshop: EMRISur1dq1e4 Proejct Tutorial Some portions of this notebook are also found in the notebook [EMRISur1dq1e4.ipynb](https://github.com/BlackHolePerturbationToolkit/EMRISurrogate/blob/master/EMRISur1dq1e4.ipynb). The waveform model is described in [arXiv:1910.10473](https://arxiv.org/abs/1910.10473). EMRISur1dq1e4 is a surrogate gravitational-waveform model for non-spinning black hole binary systems with mass-ratios varying from 3 to $10^4$. This surrogate model is trained on waveform data generated by point-particle black hole perturbation theory (ppBHPT), with the total mass rescaling paramter tuned to NR simulations according to the paper's Eq. 4. Available modes are $\{(2,2), (2,1), (3,3), (3,2), (3,1), (4,4), (4,3), (4,2), (5,5), (5,4), (5,3)\}$. The $m<0$ modes can be deduced from the m>0 modes due to symmetry of the system about the orbital plane. **NOTE**: This notebook rquires the file emri_sur_load.py and EMRISur1dq1e4.h5 datafile in the same directory for waveform generation # Setup 1. You should now have the programs git and Jupyter as well as the Python packages scipy, h5py, and hashlib 2. clone the EMRISurrogate code and this tutorial ```shell >>> git clone https://github.com/BlackHolePerturbationToolkit/EMRISurrogate.git >>> cd EMRISurrogate # move into the new directory >>> jupyter notebook BHPToolkit_Spring2020_Tutorial.ipynb # launch this tutorial ``` (no-git fallback plan: download the zip file from https://github.com/BlackHolePerturbationToolkit/EMRISurrogate) ``` # If your setup is correct, then this block of code should execute... import matplotlib.pyplot as plt import numpy as np import hashlib import h5py ``` # Lesson 1: The data Surrogate models are numerical models. They require code *and* data The EMRI surrogate model's data is [hosted on zenodo](https://zenodo.org/record/3612600#.XsoAP3VKg5k). Our first task is to download the data: ```shell >>> wget https://zenodo.org/record/3612600/files/EMRISur1dq1e4.h5 ``` or click the "download" button and move the file to the same folder as this notebook. ## Make sure your data is at the latest version The data file has a unique hash, which from zenodo is d145958484738e0c7292e084a66a96fa. If the surrogate model is updated then EMRISur1dq1e4.h5 will be replaced with a newer file. The [EMRI surrogate code](https://github.com/BlackHolePerturbationToolkit/EMRISurrogate/blob/master/emri_sur_load.py#L48) checks that your local h5 file's hash matches the most recent version. <br> <img src="hash_check.png" width="940" /> ``` # data integrity: lets check that your file's hash is correct # Current hash: d145958484738e0c7292e084a66a96fa def md5(fname): hash_md5 = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() print( md5('EMRISur1dq1e4.h5') ) # now import the some functions to help with evaluating the model # This step requires that you've cloaned the git # project EMRISurrogate and have the data file listed above import emri_sur_load as emriSur ``` ## Data file's structure Recall that the full surrogate model is \begin{align} h_{\tt S}(t,\theta,\phi;q) & = \sum_{\ell,m} h_{\tt S}^{\ell,m}(t;q) {}_{-2}Y_{\ell m} (\theta,\phi) \\ h_{\tt S}^{\ell,m}(t;q) & = A_{\tt S}^{\ell,m}(t;q) \exp(- \mathrm{i} \phi_{\tt S}^{\ell,m}(t;q)) \,, \end{align} and we build models for $A_{\tt S}^{\ell,m}$, $\phi_{\tt S}^{\ell,m}$. Each $A_{\tt S}^{\ell,m}$ and $\phi_{\tt S}^{\ell,m}$ is represented as an empirical interpolant (EI) with parametric fits that the time nodes. Lets check that our data file matches this structure. ``` # Each mode is a separate surrogate model # open the file fp = h5py.File("EMRISur1dq1e4.h5","r") # inspect the data groups print("EMRI surrogate data groups: ",fp.keys() ) print("\n\n" ) # grab the 22 mode data group sur_22 = fp["l2_m2"] # inspect (2,2)-mode's data groups print("22 mode surrogate data groups: ",sur_22.keys() ) ``` ## The surrogate model for $A^{22}(t;q)$ Do to an unfortunate choice made a long time ago, the quantitites without any extra postfix denote ampitude date: B_phase is the basis for the phase, while B is the basis for the amplitude. So we have the following data for the 22 mode's amplitude: ### Information about the amplitude's parametric dependence * 'fit_type_amp': model for the amplitude's dependence with mass ratio * 'parameterization': how we parameterize the amplitude data * 'fit_max' / 'fit_min' : smallest / largest values of parametric domain * 'n_spline_knots': number of spline knots * 'spline_knots': location of spline knots * 'fitparams_amp': spline parameters * 'degree': degree of the spline used ### Information about the amplitude's temporal dependence * 'B': basis * 'eim_indicies': location of empirical interpolation nodes * 'times': temporal domain on which the amplitude is modeled The model is given by \begin{align} A_{\tt S}^{22}(t;q) = \sum_{i=0}^9 A(T_i^\mathrm{EIM};q) B_i(t) \,, \end{align} where $B_i(t)$ is the $i^{th}$ basis function and $T_i^\mathrm{EIM}$ is the $i^{th}$ empirical interpolation time node. We see that, $A(T_i^\mathrm{EIM};q)$ are the coefficients defining the amplitude's expansion in terms of the basis. Data found in "Information about the amplitude's parametric dependence" is used to model each coefficient, $A(T_i^\mathrm{EIM};q)$, over the paramter space. ``` # Lets inspect the basis for the 22 mode's amplitude # Because each basis is a linear combination of amplitudes, # the basis will look kinda like an waveform amplitude. # The basis will not look like any standard function (polynomials, sine/cosine, etc...) ### plot the ith cardinal basis function ### i = 0 times = sur_22['times'] B = sur_22['B'] plt.figure(1) plt.plot(times,B[:,i]) plt.title('Cardinal basis %i for amplitude'%i) plt.show() # Lets inspect the empirical interpolation time nodes for the # 22 mode's amplitude. # These nodes are used to define the interpolation problem: the # surrogate interpolates the waveform training data at these times. # The time nodes will automatically be placed in regions of higher # activitiy. Unlike uniform, Chebyshev, or other common time nodes # the EI nodes are adaptive to the problem ### plot the location of all emprical interpolation points ### plt.figure(2) eim_indicies = sur_22['eim_indicies'][:] plt.plot(times,B[:,0]) # 0^th basis function plt.plot(times[np.sort(eim_indicies)],np.zeros_like(eim_indicies),'r*') plt.title('EI time node locations') plt.figure(3) eim_indicies = sur_22['eim_indicies'][:] plt.plot(times,B[:,0]) # 0^th basis function plt.plot(times[np.sort(eim_indicies)],np.zeros_like(eim_indicies),'r*') plt.xlim([-500,100]) plt.title('EI time node locations') ``` # Lesson 2: Evaluating the model Input: mass ratio q, and (optionally) modes to generate = [(2,1),(2,2),(3,1),(3,2),(3,3),(4,2),(4,3),(4,4),(5,3),(5,4),(5,5)] Output: dictionary of modes, $h_{\tt S}^{\ell,m}(t;q)$ \begin{align} h_{\tt S}(t,\theta,\phi;q) & = \sum_{\ell,m} h_{\tt S}^{\ell,m}(t;q) {}_{-2}Y_{\ell m} (\theta,\phi) \end{align} Both h and t are in geometric units ``` q = 10.0 # mass ratio time, h = emriSur.generate_surrogate(q) # h is a dictionary with modes as its key # h is a dictionary of modes h_22=h[(2,2)] h_21=h[(2,1)] plt.figure(figsize=(14,4)) plt.plot(time,np.real(h_22),label='{2,2} mode') plt.plot(time,np.real(h_21),label='{2,1} mode') plt.xlabel('t',fontsize=12) plt.ylabel('h(t)',fontsize=12) plt.legend(fontsize=12) #plt.savefig('emri_sur_q_%f.png'%q) plt.show() # waveform in mks units (roughly what the detector would observe) G=6.674*1e-11 MSUN_SI = 1.9885469549614615e+30 PC_SI = 3.085677581491367e+16 C_SI = 299792458.0 # values of M and dL M=80.0*MSUN_SI dL=100.0* PC_SI # scaling of time and h(t) time=time*(G*M/C_SI**3) ht22=np.array(h[(2,2)])*(G*M/C_SI**3)/dL plt.figure(figsize=(14,4)) plt.title('M=80, dL=100.0') plt.plot(time,np.real(ht22),label='{2,2} mode') plt.xlabel('t',fontsize=12) plt.ylabel('h(t)',fontsize=12) plt.legend(fontsize=12) #plt.savefig('emri_sur_q_%f_physical.png'%q) plt.show() ``` # Lesson 3: EMRI surrogate vs output from the Teukolsky solver Our underlying model (denoted $h^{\ell,m}_{\tt S}$ below) is for the output of the Teukolsky solver for point-particle perturbation theory. However, the EMRI surrogate model (denoted $h^{\ell,m}_{\tt S, \alpha}$ below) is calibrated to numerical relativity waveforms at comparable mass binaries with a single parameter $\alpha$: \begin{align*} h^{\ell,m}_{\tt S, \alpha}(t ; q)= {\alpha} h^{\ell,m}_{\tt S}\left( t \alpha;q \right) \,, \end{align*} Which is enacted in emri_sur_load.py file as shown below: <img src="Alpha_scaling_in_code.png" width="840" /> ### To generate output from the Teukolsky solver (no NR calibration) simply set $\alpha = 1$ in lines 110 and 118 (Issue tracker: feature request to allow for this without needing to modify the code) <img src="Alpha_scaling_in_code.png" width="840" /> # Summary 1. To understand how to use the EMRI surrogate model, please consult the Jupyter noteobook found with the [EMRISur1dq1e4 proejct](https://github.com/BlackHolePerturbationToolkit/EMRISurrogate/blob/master/EMRISur1dq1e4.ipynb) in addition to this notebook. 2. Future EMRI surrogate models or improvements will be added here. 3. If you find any issues or have suggestions please open up an issue (or, better yet, a pull request!) <br> <img src="Issue1.png" width="840" />
github_jupyter
# What’s New In Python 3.10 > **See also:** > > * [What’s New In Python 3.10](https://docs.python.org/3.10/whatsnew/3.10.html) ``` import sys assert sys.version_info[:2] >= (3, 10) ``` ## Better error messages ### Syntax Errors * When parsing code that contains unclosed parentheses or brackets the interpreter now includes the location of the unclosed bracket of parentheses instead of displaying `SyntaxError: unexpected EOF`. * `SyntaxError` exceptions raised by the interpreter will now highlight the full error range of the expression that consistutes the syntax error itself, instead of just where the problem is detected. * Specialised messages for `SyntaxError` exceptions have been added e.g. for * missing `:` before blocks * unparenthesised tuples in comprehensions targets * missing commas in collection literals and between expressions * missing `:` and values in dictionary literals * usage of `=` instead of `==` in comparisons * usage of `*` in f-strings ### Indentation Errors * Many `IndentationError` exceptions now have more context. ### Attribute Errors * `AttributeError` will offer suggestions of similar attribute names in the object that the exception was raised from. ### Name Errors * `NameError` will offer suggestions of similar variable names in the function that the exception was raised from. ## Structural Pattern Matching Many functional languages have a `match` expression, for example [Scala](https://www.scala-lang.org/files/archive/spec/2.11/08-pattern-matching.html), [Rust](https://doc.rust-lang.org/reference/expressions/match-expr.html), [F#](https://docs.microsoft.com/en-us/dotnet/fsharp/language-reference/pattern-matching). A `match` statement takes an expression and compares it to successive patterns given as one or more case blocks. This is superficially similar to a switch statement in C, Java or JavaScript, but much more powerful. ### `match` The simplest form compares a subject value against one or more literals: ``` def http_error(status): match status: case 400: return "Bad request" case 401: return "Unauthorized" case 403: return "Forbidden" case 404: return "Not found" case 418: return "I'm a teapot" case _: return "Something else" ``` > **Note:** > > Only in this case `_` acts as a wildcard that never fails and **not** as a variable name. The cases not only check for equality, but rebind variables that match the specified pattern. For example: ``` NOT_FOUND = 404 retcode = 200 match retcode: case NOT_FOUND: print('not found') print(f"Current value of {NOT_FOUND=}") ``` > «If this poorly-designed feature is really added to Python, we lose a principle I’ve always taught students: ‹if you see an undocumented constant, you can always name it without changing the code’s meaning.› The Substitution Principle, learned in algebra? It’ll no longer apply.» – [Brandon Rhodes](https://twitter.com/brandon_rhodes/status/1360226108399099909) > «… the semantics of this can be quite different from switch. The cases don't simply check equality, they rebind variables that match the specified pattern.» – [Jake VanderPlas](https://twitter.com/jakevdp/status/1359870794877132810) ### Symbolic constants Patterns may use named constants. These must be dotted names to prevent them from being interpreted as capture variable: ``` from enum import Enum class Color(Enum): RED = 0 GREEN = 1 BLUE = 2 color = Color(2) match color: case color.RED: print("I see red!") case color.GREEN: print("Grass is green") case color.BLUE: print("I'm feeling the blues :(") ``` > «… "case CONSTANT" actually matching everything and assigning to a variable named CONSTANT» – [Armin Ronacher](https://twitter.com/mitsuhiko/status/1359263136994516999) > **See also:** > > * [Structural pattern matching for Python](https://lwn.net/Articles/827179/) > * [PEP 622 – Structural Pattern Matching](https://www.python.org/dev/peps/pep-0622) superseded by > * [PEP 634: Specification](https://www.python.org/dev/peps/pep-0634) > * [PEP 635: Motivation and Rationale](https://www.python.org/dev/peps/pep-0635) > * [PEP 636: Tutorial](https://www.python.org/dev/peps/pep-0636) > * [github.com/gvanrossum/patma/](https://github.com/gvanrossum/patma/) > * [playground-622.ipynb on binder](https://mybinder.org/v2/gh/gvanrossum/patma/master?urlpath=lab/tree/playground-622.ipynb) > * [Tobias Kohn: On the Syntax of Pattern Matching in Python](https://tobiaskohn.ch/index.php/2018/09/18/pattern-matching-syntax-in-python/)
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Eager Execution <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/guide/eager"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/eager.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/eager.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [[email protected] メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ja)にご連絡ください。 TensorflowのEager Executionは、計算グラフの作成と評価を同時におこなう命令的なプログラミングを行うための環境です: オペレーションはあとで実行するための計算グラフでなく、具体的な計算結果の値を返します。 この方法を用いることにより、初心者にとってTensorFlowを始めやすくなり、またモデルのデバッグも行いやすくなります。 さらにコードの記述量も削減されます。 このガイドの内容を実行するためには、対話的インタープリタ`python`を起動し、以下のコードサンプルを実行してください。 Eager Executionは研究や実験のための柔軟な機械学習環境として、以下を提供します。 * *直感的なインタフェース*—Pythonのデータ構造を使用して、コードをナチュラルに記述することができます。スモールなモデルとデータに対してすばやく実験を繰り返すことができます。 * *より簡単なデバッグ*—opsを直接呼び出すことで、実行中のモデルを調査したり、変更をテストすることができます。Python標準のデバッグツールを用いて即座にエラーのレポーティングができます。 * *自然な制御フロー*—TensorFlowのグラフ制御フローの代わりにPythonの制御フローを利用するため、動的なモデルのパラメータ変更をシンプルに行うことができます。 Eager ExecutionはTensorflowのほとんどのオペレーションとGPUアクセラレーションをサポートします。 Eager Executionの実行例については、以下を参照してください。 [tensorflow/contrib/eager/python/examples](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples). Note: いくつかのモデルはEager Executionを有効化することでオーバヘッドが増える可能性があります。 パフォーマンス改善を行っていますが、もしも問題を発見したら、バグ報告してベンチマークを共有してください。 ## セットアップと基本的な使い方 Eager Executionをはじめるためには、プログラムやコンソールセッションの最初に、`tf.enable_eager_execution()`を追加してください。 プログラムが呼び出すほかのモジュールにこのオペレーションを追加しないでください。 ``` from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf tf.enable_eager_execution() ``` これでTensorFlowのオペレーションを実行してみましょう。結果はすぐに返されます。 ``` tf.executing_eagerly() x = [[2.]] m = tf.matmul(x, x) print("hello, {}".format(m)) ``` Eager Executionを有効化することで、TensorFlowの挙動は変わります—TensorFlowは即座に式を評価して結果をPythonに返すようになります。 `tf.Tensor` オブジェクトは計算グラフのノードへのシンボリックハンドルの代わりに具体的な値を参照します。 セッションの中で構築して実行する計算グラフが存在しないため、`print()`やデバッガを使って容易に結果を調べることができます。 勾配計算を終了することなくテンソル値を評価、出力、およびチェックすることができます。 Eager Executionは、[NumPy](http://www.numpy.org/)と一緒に使うことができます。 NumPyのオペレーションは、`tf.Tensor`を引数として受け取ることができます。 TensorFlow [math operations](https://www.tensorflow.org/api_guides/python/math_ops) はPythonオブジェクトとNumpy arrayを`tf.Tensor`にコンバートします。 `tf.Tensor.numpy`メソッドはオブジェクトの値をNumPyの`ndarray`形式で返します。 ``` a = tf.constant([[1, 2], [3, 4]]) print(a) # ブロードキャストのサポート b = tf.add(a, 1) print(b) # オペレータのオーバーロードがサポートされている print(a * b) # NumPy valueの使用 import numpy as np c = np.multiply(a, b) print(c) # Tensorからnumpyの値を得る print(a.numpy()) # => [[1 2] # [3 4]] ``` `tf.contrib.eager` モジュールは、Eager ExecutionとGraph Executionの両方の環境で利用可能なシンボルが含まれており、[Graph Execution](#work_with_graphs)方式での記述に便利です: ``` tfe = tf.contrib.eager ``` ## 動的な制御フロー Eager Executionの主要なメリットは、モデルを実行する際にホスト言語のすべての機能性が利用できることです。 たとえば、[fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz)が簡単に書けます: ``` def fizzbuzz(max_num): counter = tf.constant(0) max_num = tf.convert_to_tensor(max_num) for num in range(1, max_num.numpy()+1): num = tf.constant(num) if int(num % 3) == 0 and int(num % 5) == 0: print('FizzBuzz') elif int(num % 3) == 0: print('Fizz') elif int(num % 5) == 0: print('Buzz') else: print(num.numpy()) counter += 1 fizzbuzz(15) ``` この関数はテンソル値に依存する条件式を持ち、実行時にこれらの値を表示します。 ## モデルの構築 多くの機械学習モデルはレイヤーを積み重ねによって成り立っています。Eager ExecutionでTensorFlowを使うときは、自分でレイヤーの内容を記述してもいいし、もしくは `tf.keras.layers`パッケージで提供されるレイヤーを使うこともできます。 レイヤーを表現するためには任意のPythonオブジェクトを使用できますが、 TensorFlowには便利な基本クラスとして `tf.keras.layers.Layer`があります。 このクラスを継承した独自のレイヤーを実装してみます: ``` class MySimpleLayer(tf.keras.layers.Layer): def __init__(self, output_units): super(MySimpleLayer, self).__init__() self.output_units = output_units def build(self, input_shape): # buildメソッドは、レイヤーが初めて使われたときに呼ばれます # build()で変数を作成すると、それらのshapeを入力のshapeに依存させることができ、 # ユーザがshapeを完全に指定する必要はありません。 # 既に完全なshapeが決まっている場合は、__init__()の中で変数を作成することもできます。 self.kernel = self.add_variable( "kernel", [input_shape[-1], self.output_units]) def call(self, input): # __call__の代わりにcall()を上書きします。 return tf.matmul(input, self.kernel) ``` `MySimpleLayer`の代わりに、その機能のスーパーセットを持っている`tf.keras.layers.Dense`レイヤーを使用してください (このレイヤーはバイアスを加えることもできるもできます)。 レイヤーをモデルに組み立てるとき、レイヤーの線形スタックである モデルを表すために `tf.keras.Sequential`を使うことができます。この書き方は基本的なモデルを扱いやすいです。 ``` model = tf.keras.Sequential([ tf.keras.layers.Dense(10, input_shape=(784,)), # 入力のshapeを指定する必要がある tf.keras.layers.Dense(10) ]) ``` もしくは、 `tf.keras.Model`を継承してモデルをクラスにまとめます。 これはレイヤー自身であるレイヤーのコンテナで、 `tf.keras.Model`オブジェクトが他の` tf.keras.Model`オブジェクトを含むことを可能にします。 Alternatively, organize models in classes by inheriting from `tf.keras.Model`. This is a container for layers that is a layer itself, allowing `tf.keras.Model` objects to contain other `tf.keras.Model` objects. ``` class MNISTModel(tf.keras.Model): def __init__(self): super(MNISTModel, self).__init__() self.dense1 = tf.keras.layers.Dense(units=10) self.dense2 = tf.keras.layers.Dense(units=10) def call(self, input): """Run the model.""" result = self.dense1(input) result = self.dense2(result) result = self.dense2(result) # dense2レイヤーを再利用します reuse variables from dense2 layer return result model = MNISTModel() ``` 入力のshapeは最初のレイヤーに初めて入力データを渡すときにセットされるため、 モデル構築時に`tf.keras.Model`クラスに設定する必要はありません。 `tf.keras.layers`クラスは独自のモデル変数を作成し、包含します。このモデル変数は、それを含むレイヤーオブジェクトのライフタイムにひもづきます。レイヤー変数を共有するには、それらのオブジェクトを共有します。 ## Eager Executionにおける学習 ### 勾配の計算 [自動微分](https://en.wikipedia.org/wiki/Automatic_differentiation)はニューラルネットワークの学習で利用される[バックプロパゲーション](https://en.wikipedia.org/wiki/Backpropagation)などの機械学習アルゴリズムの実装を行う上で便利です。 Eager Executionでは、勾配計算をあとで行うためのオペレーションをトレースするために`tf.GradientTape` を利用します。 `tf.GradientTape` はトレースしない場合に最大のパフォーマンスを提供するオプトイン機能です。各呼び出し中に異なるオペレーションが発生する可能性があるため、すべてのforward-passオペレーションは一つの「テープ」に記録されます。勾配を計算するには、テープを逆方向に再生してから破棄します。特定の `tf.GradientTape`は一つのグラデーションしか計算できません。後続の呼び出しは実行時エラーをスローします。 ``` w = tf.Variable([[1.0]]) with tf.GradientTape() as tape: loss = w * w grad = tape.gradient(loss, w) print(grad) # => tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32) ``` ### モデル学習 以下のexampleはMNISTという手書き数字分類を行うマルチレイヤーモデルを作成します。 Eager Execution環境における学習可能なグラフを構築するためのオプティマイザーとレイヤーAPIを提示します。 ``` # mnistデータのを取得し、フォーマットする (mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data() dataset = tf.data.Dataset.from_tensor_slices( (tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32), tf.cast(mnist_labels,tf.int64))) dataset = dataset.shuffle(1000).batch(32) # モデルを構築する mnist_model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(10) ]) ``` 学習を行わずとも、モデルを呼び出して、Eager Executionにより、出力を検査することができます: ``` for images,labels in dataset.take(1): print("Logits: ", mnist_model(images[0:1]).numpy()) ``` kerasモデルは組み込みで学習のループを回すメソッド`fit`がありますが、よりカスタマイズが必要な場合もあるでしょう。 Eager Executionを用いて実装された学習ループのサンプルを以下に示します: ``` optimizer = tf.train.AdamOptimizer() loss_history = [] for (batch, (images, labels)) in enumerate(dataset.take(400)): if batch % 10 == 0: print('.', end='') with tf.GradientTape() as tape: logits = mnist_model(images, training=True) loss_value = tf.losses.sparse_softmax_cross_entropy(labels, logits) loss_history.append(loss_value.numpy()) grads = tape.gradient(loss_value, mnist_model.trainable_variables) optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables), global_step=tf.train.get_or_create_global_step()) import matplotlib.pyplot as plt plt.plot(loss_history) plt.xlabel('Batch #') plt.ylabel('Loss [entropy]') ``` ### 値とオプティマイザ `tf.Variable` オブジェクトは、学習中にアクセスされるミュータブルな`tf.Tensor`値を格納し、自動微分を容易にします。 モデルのパラメータは、変数としてクラスにカプセル化できます。 `tf.GradientTape`と共に` tf.Variable`を使うことでモデルパラメータはよりカプセル化されます。たとえば、上の の自動微分の例は以下のように書き換えることができます: ``` class Model(tf.keras.Model): def __init__(self): super(Model, self).__init__() self.W = tf.Variable(5., name='weight') self.B = tf.Variable(10., name='bias') def call(self, inputs): return inputs * self.W + self.B # 3 * 2 + 2を近似するトイデータセット NUM_EXAMPLES = 2000 training_inputs = tf.random_normal([NUM_EXAMPLES]) noise = tf.random_normal([NUM_EXAMPLES]) training_outputs = training_inputs * 3 + 2 + noise # オプティマイズ対象のloss関数 def loss(model, inputs, targets): error = model(inputs) - targets return tf.reduce_mean(tf.square(error)) def grad(model, inputs, targets): with tf.GradientTape() as tape: loss_value = loss(model, inputs, targets) return tape.gradient(loss_value, [model.W, model.B]) # 定義: # 1. モデル # 2. モデルパラメータに関する損失関数の導関数 # 3. 導関数に基づいて変数を更新するストラテジ。 model = Model() optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) print("Initial loss: {:.3f}".format(loss(model, training_inputs, training_outputs))) # 学習ループ for i in range(300): grads = grad(model, training_inputs, training_outputs) optimizer.apply_gradients(zip(grads, [model.W, model.B]), global_step=tf.train.get_or_create_global_step()) if i % 20 == 0: print("Loss at step {:03d}: {:.3f}".format(i, loss(model, training_inputs, training_outputs))) print("Final loss: {:.3f}".format(loss(model, training_inputs, training_outputs))) print("W = {}, B = {}".format(model.W.numpy(), model.B.numpy())) ``` ## Eager Executionの途中でオブジェクトのステータスを使用する Graph Executionでは、プログラムの状態(変数など)はglobal collectionに格納され、それらの存続期間は `tf.Session`オブジェクトによって管理されます。 対照的に、Eager Executionの間、状態オブジェクトの存続期間は、対応するPythonオブジェクトの存続期間によって決定されます。 ### 変数とオブジェクト Eager Executionの間、変数はオブジェクトへの最後の参照が削除され、その後削除されるまで存続します。 ``` if tf.test.is_gpu_available(): with tf.device("gpu:0"): v = tf.Variable(tf.random_normal([1000, 1000])) v = None # vは既にGPUメモリ上を使用しないようにする ``` ### オブジェクトベースの保存 `tf.train.Checkpoint`はチェックポイントを用いて`tf.Variable`を保存および復元することができます: ``` x = tf.Variable(10.) checkpoint = tf.train.Checkpoint(x=x) x.assign(2.) # 変数に新しい値を割り当てて保存する checkpoint_path = './ckpt/' checkpoint.save('./ckpt/') x.assign(11.) # 保存後に変数の値を変更する # チェックポイントから値を復元する checkpoint.restore(tf.train.latest_checkpoint(checkpoint_path)) print(x) # => 2.0 ``` モデルを保存して読み込むために、 `tf.train.Checkpoint`は隠れ変数なしにオブジェクトの内部状態を保存します。 `モデル`、 `オプティマイザ`、そしてグローバルステップの状態を記録するには、それらを `tf.train.Checkpoint`に渡します。 ``` import os import tempfile model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(10) ]) optimizer = tf.train.AdamOptimizer(learning_rate=0.001) checkpoint_dir = tempfile.mkdtemp() checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") root = tf.train.Checkpoint(optimizer=optimizer, model=model, optimizer_step=tf.train.get_or_create_global_step()) root.save(checkpoint_prefix) root.restore(tf.train.latest_checkpoint(checkpoint_dir)) ``` ### オブジェクト指向メトリクス `tfe.metrics`はオブジェクトとして保存されます。新しいデータを呼び出し可能オブジェクトに渡してメトリクスを更新し、 `tfe.metrics.result`メソッドを使って結果を取得します。次に例を示します: ``` m = tfe.metrics.Mean("loss") m(0) m(5) m.result() # => 2.5 m([8, 9]) m.result() # => 5.5 ``` #### サマリとTensorBoard [TensorBoard](../guide/summaries_and_tensorboard.md) はモデルの学習プロセスを理解、デバッグ、最適化するための可視化ツールです。プログラムの実行中に書き込まれるサマリイベントを使用します。 `tf.contrib.summary`はEager ExecutionとGraph Executionの両方の環境と互換性があります。 `tf.contrib.summary.scalar`のようなサマリオペレーションはモデル構築の間に挿入されます。 たとえば、100のグローバルステップごとにサマリを記録するには、次のようにします。 ``` global_step = tf.train.get_or_create_global_step() logdir = "./tb/" writer = tf.contrib.summary.create_file_writer(logdir) writer.set_as_default() for _ in range(10): global_step.assign_add(1) # record_summariesメソッドをincludeする必要がある with tf.contrib.summary.record_summaries_every_n_global_steps(100): # ここにモデルのコードを記述する tf.contrib.summary.scalar('global_step', global_step) !ls tb/ ``` ## 高度な自動分類トピック ### 動的なモデル `tf.GradientTape`は動的モデルでも使うことができます。 以下の[バックトラックライン検索](https://wikipedia.org/wiki/Backtracking_line_search) アルゴリズムの例は、複雑な制御フローにも関わらず 勾配があり、微分可能であることを除いて、通常のNumPyコードのように見えます: ``` def line_search_step(fn, init_x, rate=1.0): with tf.GradientTape() as tape: # 変数は自動的に記録されるが、手動でTensorを監視する tape.watch(init_x) value = fn(init_x) grad = tape.gradient(value, init_x) grad_norm = tf.reduce_sum(grad * grad) init_value = value while value > init_value - rate * grad_norm: x = init_x - rate * grad value = fn(x) rate /= 2.0 return x, value ``` ### 勾配計算のための追加機能 `tf.GradientTape`は強力な勾配計算インタフェースですが、 自動微分に利用できる別の[Autograd](https://github.com/HIPS/autograd)スタイルのAPIもあります。 これらの関数はテンソルと勾配関数のみを使って、`tf.variables`を使わずに数式コードを書く場合に便利です: * `tfe.gradients_function`—引数をとり、入力関数パラメータの導関数を計算する関数を返します。 入力パラメータはスカラ値を返さなければなりません。返された関数が されると、 `tf.Tensor`オブジェクトのリストを返します:入力関数のそれぞれの 引数に対して一つの要素。重要なものすべてを関数パラメータとして渡さなければならないので、 多くのtrainableパラメータに依存している場合、これは扱いにくくなります。 * `tfe.value_and_gradients_function`—` tfe.gradients_function`に似ていますが、 返された関数が呼び出されると、その引数に関する入力関数の導関数のリストに加えて、入力関数からの値を返します。 次の例では、 `tfe.gradients_function`は引数として` square` 関数を取り、その入力に関して `square`の偏微分 導関数を計算する関数を返します。 `3`における` square`の微分を計算するために、 `grad(3.0)`は `6`を返します。 ``` def square(x): return tf.multiply(x, x) grad = tfe.gradients_function(square) square(3.).numpy() grad(3.)[0].numpy() # 平方の二次導関数: gradgrad = tfe.gradients_function(lambda x: grad(x)[0]) gradgrad(3.)[0].numpy() # 3次導関数はNoneになる: gradgradgrad = tfe.gradients_function(lambda x: gradgrad(x)[0]) gradgradgrad(3.) # フロー制御: def abs(x): return x if x > 0. else -x grad = tfe.gradients_function(abs) grad(3.)[0].numpy() grad(-3.)[0].numpy() ``` ### カスタム勾配 カスタム勾配は、Eager ExecutionとGraph Executionの両方の環境で、勾配を上書きする簡単な方法です。 フォワード関数では、 入力、出力、または中間結果に関する勾配を定義します。たとえば、逆方向パスにおいて勾配のノルムを切り取る簡単な方法は次のとおりです: ``` @tf.custom_gradient def clip_gradient_by_norm(x, norm): y = tf.identity(x) def grad_fn(dresult): return [tf.clip_by_norm(dresult, norm), None] return y, grad_fn ``` カスタム勾配は、一連の演算に対して数値的に安定した勾配を提供するために共通的に使用されます。 ``` def log1pexp(x): return tf.log(1 + tf.exp(x)) grad_log1pexp = tfe.gradients_function(log1pexp) # 勾配計算はx = 0のときにはうまくいきます。 grad_log1pexp(0.)[0].numpy() # しかし、x = 100のときは数値的不安定により失敗します。 grad_log1pexp(100.)[0].numpy() ``` ここで、 `log1pexp`関数はカスタム勾配を用いて解析的に単純化することができます。 以下の実装は、フォワードパスの間に計算された `tf.exp(x)`の値を 再利用します—冗長な計算を排除することでより効率的になります: ``` @tf.custom_gradient def log1pexp(x): e = tf.exp(x) def grad(dy): return dy * (1 - 1 / (1 + e)) return tf.log(1 + e), grad grad_log1pexp = tfe.gradients_function(log1pexp) # 上と同様に、勾配計算はx = 0のときにはうまくいきます。 grad_log1pexp(0.)[0].numpy() # また、勾配計算はx = 100でも機能します。 grad_log1pexp(100.)[0].numpy() ``` ## パフォーマンス Eager Executionの間、計算は自動的にGPUにオフロードされます。計算を実行するデバイスを指定したい場合は、 `tf.device( '/ gpu:0')`ブロック(もしくはCPUを指定するブロック)で囲むことで指定できます: ``` import time def measure(x, steps): # TensorFlowはGPUを初めて使用するときに初期化するため、時間計測対象からは除外する。 tf.matmul(x, x) start = time.time() for i in range(steps): x = tf.matmul(x, x) # tf.matmulは、行列乗算が完了する前に戻ることができます。 # (たとえば、CUDAストリームにオペレーションをエンキューした後に戻すことができます)。 # 以下のx.numpy()呼び出しは、すべてのキューに入れられたオペレーションが完了したことを確認します。 # (そして結果をホストメモリにコピーするため、計算時間は単純なmatmulオペレーションよりも多くのことを含む時間になります。) _ = x.numpy() end = time.time() return end - start shape = (1000, 1000) steps = 200 print("Time to multiply a {} matrix by itself {} times:".format(shape, steps)) # CPU上で実行するとき: with tf.device("/cpu:0"): print("CPU: {} secs".format(measure(tf.random_normal(shape), steps))) # GPU上で実行するとき(GPUが利用できれば): if tfe.num_gpus() > 0: with tf.device("/gpu:0"): print("GPU: {} secs".format(measure(tf.random_normal(shape), steps))) else: print("GPU: not found") ``` `tf.Tensor`オブジェクトはそのオブジェクトに対するオペレーションを実行するために別のデバイスにコピーすることができます: ``` if tf.test.is_gpu_available(): x = tf.random_normal([10, 10]) x_gpu0 = x.gpu() x_cpu = x.cpu() _ = tf.matmul(x_cpu, x_cpu) # CPU上で実行するとき _ = tf.matmul(x_gpu0, x_gpu0) # GPU:0上で実行するとき if tfe.num_gpus() > 1: x_gpu1 = x.gpu(1) _ = tf.matmul(x_gpu1, x_gpu1) # GPU:1で実行するとき ``` ### ベンチマーク GPUでの [ResNet50](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/resnet50) の学習のような、計算量の多いモデルの場合は、Eager ExecutionのパフォーマンスはGraph Executionのパフォーマンスに匹敵します。 しかし、この2つの環境下のパフォーマンスの違いは計算量の少ないモデルではより大きくなり、小さなたくさんのオペレーションからなるモデルでホットコードパスを最適化するためにやるべきことがあります。 ## Graph Executionの実行 Eager Executionは開発とデバッグをより対話的にしますが、 TensorFlowのGraph Executionは分散学習、パフォーマンスの最適化、そしてプロダクション環境へのデプロイの観点で利点があります。 しかし、Graph Executionのコードの記述方法、標準的なのPythonコードの書き方と異なり、デバッグがより難しく感じるかもしれません。 Graph Execution形式のモデルの構築と学習のために、Pythonプログラムは最初に計算グラフを構築し、 それからC++ベースのランタイムで実行するために`Session.run`を呼び出し、グラフを渡します。この機能の特徴は以下のとおりです: * 静的なautodiffによる自動微分 * プラットフォームに依存しないサーバーへの簡単なデプロイ * グラフベースの最適化(共通的な部分式の削除、定数の畳み込みなど) * コンパイルとカーネルフュージョン * 自動分散とレプリケーション(分散システムへのノード配置) Eager Executionのコードは、Graph Executionのコードよりもデプロイが難しいです:モデルから 計算グラフを生成するか、またはサーバ上で直接Pythonランタイムからコードを実行する必要があります。 ### 互換性のあるコードの記述 Eager Execution環境で記述されたコードは、Eager Executionが有効になっていない新しいPythonセッションでおなじコードを実行するだけで おなじコードのままGraph Executionで実行することができます。 ほとんどのTensorFlowオペレーションはEager Executionで動作しますが、注意すべき点がいくつかあります: * 入力処理にはキューの代わりに `tf.data`を使います。この方法はより高速で簡単です。 * `tf.keras.layers`や`tf.keras.Model`のような、オブジェクト指向のレイヤーAPIを使用します—これらのAPIは変数のための明示的なストレージを持っているためです。 * ほとんどのモデルコードは、Eager ExecutionとGraph Executionにおなじように機能しますが、例外があります。 (たとえば、Pythonによる制御フローで入力に基づいて演算を変更する動的モデルなど) * 一度`tf.enable_eager_execution`によってEager Executionが有効化されると、それを無効化することはできません。 Graph Executionに戻すには、新しいPythonセッションを開始する必要があります。 以上が、Eager Execution *と* Graph Executionの両方のためのコードを書くためのベストプラクティスです。これによって、 Eager Executionによる対話的な実験とデバッガビリティを享受することができ、かつGraph Executionによる分散パフォーマンスの恩恵を受けることができます。 Eager Executionを用いてコードを記述、デバッグ、実験を繰り返したのちにプロダクションへのデプロイのためにモデルパスをimportします。 モデル変数を保存および復元するには `tf.train.Checkpoint`を使います。これはEager ExecutionとGraph Executionの両環境の互換性を担保します。 以下にEager Executionのサンプル集があります: [tensorflow/contrib/eager/python/examples](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples) ### Graph Execution環境でEager Executionを使う `tfe.py_func`を使ってTensorFlowGraph Execution環境でEager Executionを選択的に可能にすることができます。 この機能は、 `tf.enable_eager_execution()`が呼ばれていないときに使うことができます。 ``` def my_py_func(x): x = tf.matmul(x, x) # tfオペレーションを使用することができる print(x) # しかしEager Executionで実行される! return x with tf.Session() as sess: x = tf.placeholder(dtype=tf.float32) # Graph Execution環境でEager Executionを呼び出す pf = tfe.py_func(my_py_func, [x], tf.float32) sess.run(pf, feed_dict={x: [[2.0]]}) # [[4.0]] ```
github_jupyter
<a href="https://colab.research.google.com/github/krmiddlebrook/intro_to_deep_learning/blob/master/machine_learning/lesson%202%20-%20logistic%20regression/challenges/logistic-regression-pokemon.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Logistic regression with Pokemon data The goal in this challenge is to build a logistic regression classifier to distinguish the Type 1 Grass Pokemon from other Pokemon using features about each one. Use Tensorflow to build, train, and evaluate the model. Challenges: 1. Load and prepare the Pokemon dataset. 2. Build the model. 3. Train the model. 4. Evaluate the model. 5. Draw conclusions. ``` # import the libraries we need import pandas as pd import numpy as np import matplotlib.pyplot as plt # TensorFlow and tf.keras import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers ``` # 1. Load and prepare the data Load the dataset into a pandas dataframe, and prepare it for the model. Hints: - Define the features ($\mathbf{x}$) and labels ($y$). You will probably want to use the Pandas `map` function to convert Pokemon with the Type 1 feature to the proper numerical representation, think "Grass" or not. - split the dataset into training and test sets - separate the features and labels in training set and test set ``` data_url = 'https://raw.githubusercontent.com/krmiddlebrook/intro_to_deep_learning/master/datasets/pokemon.csv' # your code here ``` # 2. Build your model Build a model to model the relationship between the features $x$ (multiple features) and labels $y$. Hints: - use the `Sequential` class to define a container for the layers of your model. - use the `layers.Dense` class with the sigmoid activation to define your logistic model layer - define your loss function with "binary_crossentropy" - configure the optimization algorithm with stochastic gradient descent - track the accuracy metric - glue the model, loss function, optimizer, and metrics together ``` # your code here model = keras.Sequential([ ]) ``` # 3. Train your model Now that you have a model, it's time to train it. Train your model for 100 epochs (i.e., iterations), and record the training and validation metrics in the history object. ``` # your code here ``` Visualize the mean squared error metric over the training process. Hint: create a line chart with the epoch (x) and the accuracy (y) variables. ``` # your code here ``` # 4. Evaluate the model Now that the model is trained, it's time to evaluate it using the test dataset, which you did not use when training the model. This gives you a sense of how well the model predicts unseen data, which is the case when you use it in the real world. Make sure to evaluate the model and visualize it's predictions against the true values. Hints: - use the `evaluate` method to test the model - use the `predict` method to make predictions given test features. - visualize the predictions against the real labels using matplotlib's pyplot API methods like `scatter` and `plot`. ``` # your code here ``` # 5. Draw conclusions Write up your conclusions about the model. Report the goal, the model design, and the results. Make sure to contextualize the model results as best you can.
github_jupyter
``` #necessary imports import numpy as np import scipy from scipy.special import gamma, factorial import scipy.special as sc import mpmath as mp import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D #for 3D surface plots import math from cmath import phase from scipy.ndimage.filters import gaussian_filter1d plt.rc('xtick',labelsize=20) plt.rc('ytick',labelsize=20) """ Reproducing plots from the following paper on point mass and SIS lens models R. Takahashi and T. Nakamura, “Wave effects in the gravitational lensing of gravitational waves from chirping binaries,” The Astrophysical Journal, vol. 595, pp. 1039–1051, 2003. """ #Point mass gravitational lens model #magnitude of amplification factor plot #legends on source position y=[0.1,0.25,0.5,1.0] plt.figure(figsize=(10,8)) #computing magnification factor magnitude for varying values of dimensionless frequencies for j in range(0,4): n=10000 w=np.linspace(0.01,27,n) func1=np.exp((np.pi*w)/4) z=1-((1j/2)*w) func2=abs(scipy.special.gamma(z)) a=(1j/2)*w b=np.ones(n) c=(1j/2)*w*y[j]*y[j] func3=np.zeros(n) for i in range(0,n): func3[i]=abs(mp.hyp1f1(a[i],b[i],c[i])) F=func1*func2*func3 plt.loglog(w, F) #plot plt.grid(True, which="both", ls="-") plt.xlabel('w=8πMf (dimensionless)', fontsize=20) plt.xlim(0.01,27) plt.ylim(0.1,10) plt.ylabel('|F| (dimensionless)', fontsize=20) plt.legend(['y = 0.1', 'y = 0.25', 'y = 0.5', 'y = 1.0'], loc='upper left', fontsize=20) plt.show() #legends on w w=[20,5,1] plt.figure(figsize=(10,8)) #computing magnification factor magnitude for varying values of y for j in range(0,3): n=10000 y=np.linspace(0.01,1.5,n) func1=np.exp((np.pi*w[j])/4) z=1-((1j/2)*w[j]) func2=abs(scipy.special.gamma(z)) for i in range(0,n): a[i]=(1j/2)*w[j] b=np.ones(n) c=(1j/2)*w[j]*y*y func3=np.zeros(n) for i in range(0,n): func3[i]=abs(mp.hyp1f1(a[i],b[i],c[i])) F=func1*func2*func3 plt.loglog(y, F) #plot plt.grid(True, which="both", ls="-") plt.xlabel('y (dimensionless)', fontsize=20) plt.xlim(np.amin(y),np.amax(y)) plt.ylim(0.1,20) plt.ylabel('|F| (dimensionless)', fontsize=20) plt.legend(['w = 20', 'w = 5', 'w = 1'], loc='upper right', fontsize=20) plt.show() #phase of amplification factor plot y=[0.1,0.25,0.5,1.0] plt.figure(figsize=(10,8)) xm=np.zeros(4) for j in range(0,4): n=10000 w=np.linspace(0.01,27,n) func1=np.exp((np.pi*w)/4) z=1-((1j/2)*w) func2=(scipy.special.gamma(z)) a=(1j/2)*w b=np.ones(n) c=(1j/2)*w*y[j]*y[j] func3=np.zeros(n,dtype='complex64') for i in range(0,n): func3[i]=(mp.hyp1f1(a[i],b[i],c[i])) xm=0.5*(y[j]+np.sqrt(y[j]*y[j]+4.0)) phim=0.5*((xm-y[j])**2)-np.log(xm) func4=np.zeros(n,dtype='complex64') for i in range(0,n): func4[i]=mp.exp(0.5j*w[i]*(np.log(0.5*w[i])-2.0*phim)) F=abs(func1*func2*func3) Phi=-1j*np.log((func1*func2*func3*func4)/abs((func1*func2*func3*func4))) plt.grid(True, which="both", ls="-") plt.plot(w, Phi) plt.xlabel('w=8πMf (dimensionless)', fontsize=20) plt.xscale('log') plt.xlim(np.amin(w),np.amax(w)) plt.ylabel('\u03B8(F) (in radians)', fontsize=20) plt.legend(['y = 0.1', 'y = 0.25', 'y = 0.5', 'y = 1.0'], loc='upper left', fontsize=20) plt.show() w=[20,5,1] plt.figure(figsize=(10,8)) for j in range(0,3): n=10000 y=np.linspace(0.01,1.5,n) func1=np.exp((np.pi*w[j])/4) z=1-((1j/2)*w[j]) func2=(scipy.special.gamma(z)) for i in range(0,n): a[i]=(1j/2)*w[j] b=np.ones(n) c=(1j/2)*w[j]*y*y func3=np.zeros(n,dtype='complex64') for i in range(0,n): func3[i]=(mp.hyp1f1(a[i],b[i],c[i])) xm=0.5*(y+np.sqrt(y*y+4.0)) phim=0.5*((xm-y)**2)-np.log(xm) func4=np.zeros(n,dtype='complex64') for i in range(0,n): func4[i]=mp.exp(0.5j*w[j]*(np.log(0.5*w[j])-2.0*phim[i])) F=abs(func1*func2*func3) Phi=-1j*np.log((func1*func2*func3*func4)/abs((func1*func2*func3*func4))) plt.grid(True, which="both", ls="-") plt.plot(y, Phi) plt.xlabel('y (dimensionless)',fontsize=20) plt.xscale('log') plt.ylabel('\u03B8(F) (in radians)',fontsize=20) plt.xlim(np.amin(y),np.amax(y)) plt.legend(['w = 20', 'w = 5', 'w = 1'], loc='upper left',fontsize=20) plt.show() import numpy as np import matplotlib.pyplot as plt from mpmath import gamma, hyp1f1, factorial, exp, pi,log from cmath import phase plt.rc('xtick',labelsize=20) plt.rc('ytick',labelsize=20) #singular isothermal sphere (SIS) gravitational lens model #defining the summation in F function def integrand(n,w,y): return gamma(1+n/2.0)*hyp1f1(1+n/2.0,1.0,-0.5j*w*y*y)*((2*w*exp(1j*3*pi/2))**(n/2))/factorial(n) #computing phase and magnitude of amplification factor for 4 values of y and varying values of w N = 1000 wvec = np.linspace(0.01,27,N) Fvec = np.zeros(N) Ph1 = np.zeros(N, dtype='complex64') Ph2 = np.zeros(N, dtype='complex64') Ph3 = np.zeros(N, dtype='complex64') Ph4 = np.zeros(N, dtype='complex64') y=0.1 for i,w in enumerate(wvec): delta = 1.0 F = 0.0 n = 0 while delta>1e-6: dF = integrand(n,w,y) F += dF delta = np.abs(dF) n += 1 Fvec[i] = abs(exp(0.5j*w*y*y)*F) Ph1[i]=-1j*log(exp(0.5j*w*((y*y)+(2*y)+1))*F/abs((exp(0.5j*w*((y*y)+(2*y)+1))*F))) plt.figure(figsize=(10,8)) plt.loglog(wvec,Fvec) y=0.25 for i,w in enumerate(wvec): delta = 1.0 F = 0.0 n = 0 while delta>1e-6: dF = integrand(n,w,y) F += dF delta = np.abs(dF) n += 1 Fvec[i] = abs(exp(0.5j*w*y*y)*F) Ph2[i]=-1j*log((exp(0.5j*w*((y*y)+(2*y)+1))*F)/abs((exp(0.5j*w*((y*y)+(2*y)+1))*F))) plt.loglog(wvec,Fvec) y=0.5 for i,w in enumerate(wvec): delta = 1.0 F = 0.0 n = 0 while delta>1e-6: dF = integrand(n,w,y) F += dF delta = np.abs(dF) n += 1 Fvec[i] = abs(exp(0.5j*w*y*y)*F) Ph3[i]=-1j*log(exp(0.5j*w*((y*y)+(2*y)+1))*F/abs((exp(0.5j*w*((y*y)+(2*y)+1))*F))) plt.loglog(wvec,Fvec) y=1.0 for i,w in enumerate(wvec): delta = 1.0 F = 0.0 n = 0 while delta>1e-6: dF = integrand(n,w,y) F += dF delta = np.abs(dF) n += 1 Fvec[i] = abs(exp(0.5j*w*y*y)*F) Ph4[i]=-1j*log(exp(0.5j*w*((y*y)+(2*y)+1))*F/abs((exp(0.5j*w*((y*y)+(2*y)+1))*F))) #magnitude plots plt.loglog(wvec,Fvec) plt.grid(True, which="both", ls="-") plt.xlim(0.01,27) plt.ylim(0.1,10) plt.xlabel('w=8πMf (dimensionless)', fontsize=20) plt.ylabel('|F| (dimensionless)', fontsize=20) plt.legend(['y = 0.1', 'y = 0.25', 'y = 0.5', 'y = 1.0'], loc='upper left', fontsize=20) plt.show() #phase plots plt.figure(figsize=(10,8)) plt.plot(wvec,Ph1) plt.plot(wvec,Ph2) plt.plot(wvec,Ph3) plt.plot(wvec,Ph4) plt.xscale('log') plt.xlim(np.amin(wvec),np.amax(wvec)) plt.xlabel('w=8πMf (dimensionless)', fontsize=20) plt.ylabel('\u03B8(F) (in radians)', fontsize=20) plt.grid(True, which="both", ls="-") plt.legend(['y = 0.1', 'y = 0.25', 'y = 0.5', 'y = 1.0'], loc='upper left', fontsize=20) plt.show() #phase and magnitude values computation for 4 values of w and varying values of y a=[20,5,1] yvec=np.linspace(0.01,1.5,N) plt.figure(figsize=(10,8)) for j in range(0,3): w=a[j] for i,y in enumerate(yvec): delta = 1.0 F = 0.0 n = 0 while delta>1e-3: dF = integrand(n,w,y) F += dF delta = np.abs(dF) n += 1 Fvec[i] = abs(exp(0.5j*w*y*y)*F) if(j==0): Ph1[i]=-1j*log(exp(0.5j*w*((y*y)+(2*y)+1))*F/abs((exp(0.5j*w*((y*y)+(2*y)+1))*F))) elif(j==1): Ph2[i]=-1j*log(exp(0.5j*w*((y*y)+(2*y)+1))*F/abs((exp(0.5j*w*((y*y)+(2*y)+1))*F))) elif(j==2): Ph3[i]=-1j*log(exp(0.5j*w*((y*y)+(2*y)+1))*F/abs((exp(0.5j*w*((y*y)+(2*y)+1))*F))) plt.loglog(yvec,Fvec) #magnitude plot plt.grid(True, which="both", ls="-") plt.xlabel('y (dimensionless)', fontsize=20) plt.xlim(np.amin(yvec),np.amax(yvec)) plt.ylim(0.1,20) plt.ylabel('|F| (dimensionless)', fontsize=20) plt.legend(['w = 20', 'w = 5', 'w = 1'], loc='upper right', fontsize=20) plt.show() #phase plot plt.figure(figsize=(10,8)) plt.plot(yvec,Ph1) plt.plot(yvec,Ph2) plt.plot(yvec,Ph3) plt.xscale('log') plt.xlim(np.amin(yvec),np.amax(yvec)) plt.xlabel('y (dimensionless)', fontsize=20) plt.ylabel('\u03B8(F) (in radians)', fontsize=20) plt.grid(True, which="both", ls="-") plt.legend(['w = 20', 'w = 5', 'w = 1'], loc='upper left', fontsize=20) plt.show() ```
github_jupyter
## IBM Quantum Challenge Fall 2021 # Challenge 2: OLED 분자들의 밴드갭 계산 <div id='problem'></div> <div class="alert alert-block alert-info"> 최고의 경험을 위해 오른쪽 상단의 계정 메뉴에서 **light** 워크스페이스 테마로 전환하는 것을 추천합니다. ## 소개 유기 발광 다이오드(Organic Light Emitting Diode) 또는 OLED는 전류를 인가하면 빛을 내는, 얇고 유연한 TV 및 휴대폰 디스플레이 제조의 기초 소자로 최근 몇 년 동안 점점 인기를 얻고 있습니다. 최근 연구([**Gao et al., 2021**](https://www.nature.com/articles/s41524-021-00540-6))에서는 페닐설포닐-카바졸(PSPCz) 분자에서 고에너지 상태의 전자 전이를 관측했으며, 이는 OLED 기술에 유용한 열 활성화 지연 형광(TADF) 이미터에 활용 될 수있습니다. OLED를 만들기 위해 현재 사용되는 양자 효율이 25%로 제한된 기존의 형광 포어와 비교하여 TADF 방출기는 100% 내부 양자 효율(IQP)-흡수된 광자를 방출하는 회로 또는 시스템에서 전하 캐리어의 비율-로 작동하는 OLED를 생산할 가능성을 지니고 있습니다. 효율성의 큰 증가는 제조업체들이 휴대폰 처럼 낮은 소비 전력을 요구하는 기기에 사용할 OLED를 생산할 수 있다는 것을 의미하며, 이는 결과적으로 가정, 사무실, 박물관 그리고 그보다 더 많은 넓은 면적의 공간들이 값싸고 에너지 효율이 높은 조명원을 사용하게 되는 미래의 개발로 이어질 수 있음을 의미합니다! <center><img src="resources/JSR_img6_1920w.jpg" width="600"></center> ### Why quantum? 양자 컴퓨터는 고전적인 컴퓨터보다 양자 장치에서 양자 역학 시스템을 모델링하는 것이 지닌 자연스러움으로 인해 복잡한 분자 및 재료의 전자 구조 및 동적 특성을 연구하는 데 귀중한 도구가 될 것으로 기대되고 있습니다. IBM Quantum과 파트너의 최근 공동 연구 프로젝트는 효율적인 OLED를 위한 TADF의 들뜬 상태 계산의 정확도를 향상시키는 방법을 성공적으로 개발하여 상용 재료의 들뜬 상태 계산에 양자 컴퓨터를 적용한 세계 최초의 연구 사례가 되었습니다(위에 링크된 논문 참조). 이러한 배경 정보와 함께, 효율적인 유기발광다이오드(OLED) 소자의 제작에 잠재적으로 사용될 수 있는, 산업적 화학 화합물의 "들뜬 상태" 또는 높은 에너지 상태에 대한 양자 계산을 설명하고자 합니다. ## 도전 과제 <div class="alert alert-block alert-success"> <b>목표</b> 이 도전의 목표는 양자 알고리즘을 사용하여 TADF 물질의 들뜬 상태 에너지를 신뢰성 있게 계산하는 것입니다. 그러기 위해서 이 노트북을 통해 고전적 근사 단계와 보다 정확한 양자 계산 단계 간의 작업 부하 분할을 허용하는 최첨단 하이브리드 고전-양자 방식의 화학 모델링을 도입합니다. 1. **도전 2a & 2b**: 원자 궤도(AO), 분자 궤도 (MO), 능동 공간 변환(Active Space Transformation)을 사용하여 궤도의 수를 줄이는 방법을 이해합니다. 2. **도전 2c & 2d**: NumPy 및 VQE(Variational Quantum Eigensolver)를 사용하여 PSPCz 분자의 바닥 상태 에너지를 계산합니다. 3. **도전 2e**: 양자 운동 방정식(QEOM) 알고리즘을 사용하여 PSPCz 분자의 들뜬 상태 에너지를 계산합니다. 4. **도전 2f**: Qiskit Runtime을 사용하여 클라우드(시뮬레이터 또는 실제 양자 시스템)에서 VQE를 실행합니다. </div> <div class="alert alert-block alert-info"> 시작하기에 앞서,[**Qiskit Nature Demo Session with Max Rossmannek**](https://youtu.be/UtMVoGXlz04?t=38)을 시청하고 데모에 사용된 [**demo notebook**](https://github.com/qiskit-community/qiskit-application-modules-demo-sessions/tree/main/qiskit-nature)을 통해 전자 구조 계산을 미리 배워보길 추천합니다. </div> ### 1. 드라이버(Driver) Qiskit과 고전적인 화학 코드 사이의 인터페이스를 드라이버라고 합니다. `PSI4Driver`, `PyQuanteDriver`, `PySCFDriver` 등이 있습니다. 아래의 셀에서 드라이버(주어진 기저 집합과 분자의 구조 정보에 대한 하트리-폭 계산)를 실행하여, 양자 알고리즘을 적용하기 위해 알아야 하는 대상 분자에 대한 모든 필요한 정보를 얻습니다. ``` from qiskit_nature.drivers import Molecule from qiskit_nature.drivers.second_quantization import ElectronicStructureDriverType, ElectronicStructureMoleculeDriver # PSPCz molecule geometry = [['C', [ -0.2316640, 1.1348450, 0.6956120]], ['C', [ -0.8886300, 0.3253780, -0.2344140]], ['C', [ -0.1842470, -0.1935670, -1.3239330]], ['C', [ 1.1662930, 0.0801450, -1.4737160]], ['C', [ 1.8089230, 0.8832220, -0.5383540]], ['C', [ 1.1155860, 1.4218050, 0.5392780]], ['S', [ 3.5450920, 1.2449890, -0.7349240]], ['O', [ 3.8606900, 1.0881590, -2.1541690]], ['C', [ 4.3889120, -0.0620730, 0.1436780]], ['O', [ 3.8088290, 2.4916780, -0.0174650]], ['C', [ 4.6830900, 0.1064460, 1.4918230]], ['C', [ 5.3364470, -0.9144080, 2.1705280]], ['C', [ 5.6895490, -2.0818670, 1.5007820]], ['C', [ 5.4000540, -2.2323130, 0.1481350]], ['C', [ 4.7467230, -1.2180160, -0.5404770]], ['N', [ -2.2589180, 0.0399120, -0.0793330]], ['C', [ -2.8394600, -1.2343990, -0.1494160]], ['C', [ -4.2635450, -1.0769890, 0.0660760]], ['C', [ -4.5212550, 0.2638010, 0.2662190]], ['C', [ -3.2669630, 0.9823890, 0.1722720]], ['C', [ -2.2678900, -2.4598950, -0.3287380]], ['C', [ -3.1299420, -3.6058560, -0.3236210]], ['C', [ -4.5179520, -3.4797390, -0.1395160]], ['C', [ -5.1056310, -2.2512990, 0.0536940]], ['C', [ -5.7352450, 1.0074800, 0.5140960]], ['C', [ -5.6563790, 2.3761270, 0.6274610]], ['C', [ -4.4287740, 3.0501460, 0.5083650]], ['C', [ -3.2040560, 2.3409470, 0.2746950]], ['H', [ -0.7813570, 1.5286610, 1.5426490]], ['H', [ -0.7079140, -0.7911480, -2.0611600]], ['H', [ 1.7161320, -0.2933710, -2.3302930]], ['H', [ 1.6308220, 2.0660550, 1.2427990]], ['H', [ 4.4214900, 1.0345500, 1.9875450]], ['H', [ 5.5773000, -0.7951290, 3.2218590]], ['H', [ 6.2017810, -2.8762260, 2.0345740]], ['H', [ 5.6906680, -3.1381740, -0.3739110]], ['H', [ 4.5337010, -1.3031330, -1.6001680]], ['H', [ -1.1998460, -2.5827750, -0.4596910]], ['H', [ -2.6937370, -4.5881470, -0.4657540]], ['H', [ -5.1332290, -4.3740010, -0.1501080]], ['H', [ -6.1752900, -2.1516170, 0.1987120]], ['H', [ -6.6812260, 0.4853900, 0.6017680]], ['H', [ -6.5574610, 2.9529350, 0.8109620]], ['H', [ -4.3980410, 4.1305040, 0.5929440]], ['H', [ -2.2726630, 2.8838620, 0.1712760]]] molecule = Molecule(geometry=geometry, charge=0, multiplicity=1) driver = ElectronicStructureMoleculeDriver(molecule=molecule, basis='631g*', driver_type=ElectronicStructureDriverType.PYSCF) ``` <div class="alert alert-block alert-success"> <b> 도전 2a</b> 질문: PSPCz 분자의 다음의 값들을 찾아봅시다. 1. C, H, N, O, S 원자들은 각각 몇개입니까? 1. 원자들의 총 몇 개입니까? 1. 원자 궤도는 전부 몇개입니까 (AO) ? 1. 분자 궤도는 전부 몇개입니까 (MO) ? </div> <div class="alert alert-block alert-danger"> **원자의 궤도를 어떻게 셀 수 있나요?** 궤도의 숫자는 베이시스와 관련이 있습니다. 아래의 숫자는 이 도전에 사용될 `631g*` 베이시스의 숫자들입니다. - C: 1s, 2s2p, 3s3p3d = 1+4+9 = 14 - H: 1s, 2s = 1+1 = 2 - N: 1s, 2s2p, 3s3p3d = 1+4+9 = 14 - O: 1s, 2s2p, 3s3p3d = 1+4+9 = 14 - S: 1s, 2s2p, 3s3p3d, 4s4p = 1+4+9+4 = 18 ``` num_ao = { 'C': 14, 'H': 2, 'N': 14, 'O': 14, 'S': 18, } ############################## # Provide your code here num_C_atom = num_H_atom = num_N_atom = num_O_atom = num_S_atom = num_atoms_total = num_AO_total = num_MO_total = ############################## answer_ex2a ={ 'C': num_C_atom, 'H': num_H_atom, 'N': num_N_atom, 'O': num_O_atom, 'S': num_S_atom, 'atoms': num_atoms_total, 'AOs': num_AO_total, 'MOs': num_MO_total } print(answer_ex2a) # Check your answer and submit using the following code from qc_grader import grade_ex2a grade_ex2a(answer_ex2a) ``` 위의 연습에서 발견했듯이 PSPCz는 많은 원자와 많은 원자 궤도로 구성된 거대한 분자입니다. 거대한 분자는 현재의 양자 시스템으로 직접 계산하기 어렵습니다. 그러나 이 도전에서는 밴드갭에만 관심이 있으므로 HOMO(Highest Occuppied Molecular Orbital) 및 LUMO(Lowest Unoccuppied Molecular Orbital)의 에너지를 계산하는 것만으로도 충분합니다. 이 부분에서는 분자 궤도의 수를 2개(HOMO 및 LUMO)로 줄이기 위해 능동 공간 변환(Active Space Transformation) 기술을 사용해 보도록 하겠습니다: $$E_g = E_{LUMO} - E_{HOMO}$$ <center><img src="resources/Molecule_HOMO-LUMO_diagram.svg" width="600"></center> 이 그림의 동그라미들은 궤도에 있는 전자입니다; HOMO에 위치한 전자가 충분히 높은 주파수의 에너지나 빛을 흡수하면 전자는 LUMO 상태로 점프하여 올라갑니다. 이번 PSPCz 분자의 문제에서 들뜬 상태는 첫 번째 단일항(singlet) 및 삼중항(triplet) 상태로 제한합니다. 단일항 상태에서 시스템의 모든 전자는 스핀 쌍을 이루며 공간에서 가능한 한 방향 선택할 수 있습니다. 단일 또는 삼중항 상태는 두 전자 중 하나를 더 높은 에너지 수준으로 들뜨게하여 생성됩니다. 들뜬 전자는 단일 들뜬 상태에서 동일한 스핀 방향을 유지하는 반면, 삼중 들뜬 상태에서 들뜬 전자는 바닥 상태 전자와 동일한 스핀 방향을 가집니다. <center><img src="resources/spin.jpg" width="300"><figcaption>Spin in the ground and excited states</figcaption></center> 삼중항 상태에서 전자 스핀의 한 세트는 짝을 이루지 않습니다. 즉, 공간의 세 방향 축을 스핀이 선택 할 수 있습니다. PSPCz (a, d)와 그 변형 2F-PSPCz (b, e), 4F-PSPCz (c, f)의 삼중항 상태에서 최적화된 구조의 LUMO (a-c)와 HOMO (e-f)는 다음 그림과 같을 것입니다. <center><img src="resources/oled_paper_fig2.jpg" width="600"></center> <center><img src="resources/oled_paper_fig1.jpg" width="600"></center> 능동 공간 변환 방법을 사용하여, 가능한 가장 작은 활성 공간인 단일항와 삼중항으로 계산을 제한함과 동시에 비핵심 전자 상태를 배제하면서 시스템에 대한 높은 수준의 표현력을 유지하면서 적은 수의 큐비트로 이 에너지를 계산할 수 있습니다. ``` from qiskit_nature.drivers.second_quantization import HDF5Driver driver_reduced = HDF5Driver("resources/PSPCz_reduced.hdf5") properties = driver_reduced.run() from qiskit_nature.properties.second_quantization.electronic import ElectronicEnergy electronic_energy = properties.get_property(ElectronicEnergy) print(electronic_energy) ``` `(AO) 1-Body Terms`은 대상 분자가 가진 전체 430개의 분자 궤도에 대한 430개의 원자 궤도를 표현하는 (430 x 430) 행렬을 지니고 있습니다. `ActiveSpaceTransformation`(미리 계산되어 있음) 이후에, 분자 궤도 `(MO) 1-Body Terms`의 수는 (2x2) 행렬로 감소합니다. <div class="alert alert-block alert-success"> <b> 도전 2b</b> 질문: 속성 프레임워크(Property framework)를 사용하여 아래 질문에 대한 답하시오. 1. 능동 공간 변환 후 시스템의 전자 수는 얼마입니까? 1. 분자 궤도는 몇 개입니까? What is the number of molecular orbitals? 1. Spin-orbital은 몇 개입니까? What is the number of spin-orbitals? 1. Jordan-Wigner 매핑으로 이 분자를 시뮬레이션하려면 몇 개의 큐비트가 필요합니까? </div> ``` from qiskit_nature.properties.second_quantization.electronic import ParticleNumber ############################## # Provide your code here particle_number = num_electron = num_MO = num_SO = num_qubits = ############################## answer_ex2b = { 'electrons': num_electron, 'MOs': num_MO, 'SOs': num_SO, 'qubits': num_qubits } print(answer_ex2b) # Check your answer and submit using the following code from qc_grader import grade_ex2b grade_ex2b(answer_ex2b) ``` ### 2. 전자 구조 문제 다음 단계로, 큐비트에 매핑하기 전, ElectronicStructureProblem을 사용하여 페르미온 연산자 목록을 작성합니다. 여러분의 분자 시스템의 바닥 상태를 계산하기 위한 첫번째 단계입니다. 바닥 상태를 계산하는 자세한 내용은 [**this tutorial**](https://qiskit.org/documentation/nature/tutorials/03_ground_state_solvers.html)에서 확인할 수 있습니다. <center><img src="resources/H2_gs.png" width="300"></center> ``` from qiskit_nature.problems.second_quantization import ElectronicStructureProblem ############################## # Provide your code here es_problem = ############################## second_q_op = es_problem.second_q_ops() print(second_q_op[0]) ``` ### 3. 큐비트 변환기(QubitConverter) 시뮬레이션에 사용할 매핑의 종류를 설정할 수 있게 해줍니다. ``` from qiskit_nature.converters.second_quantization import QubitConverter from qiskit_nature.mappers.second_quantization import JordanWignerMapper, ParityMapper, BravyiKitaevMapper ############################## # Provide your code here qubit_converter = ############################## qubit_op = qubit_converter.convert(second_q_op[0]) print(qubit_op) ``` ### 4. 초기 상태 이론 부분에서 설명한 것과 같이 화학 계산에서 사용하기 좋은 초기 상태는 HF 상태입니다. 다음과 같이 초기화할 수 있습니다: ``` from qiskit_nature.circuit.library import HartreeFock ############################## # Provide your code here init_state = ############################## init_state.draw() ``` ### 5. 안사츠(Ansatz) 바닥 상태를 계산하기 위해서는 좋은 양자 회로를 선택하는 것이 몹시 중요합니다. Qiskit circuit library를 사용해 여러분만의 회로를 작성하는 예제들을 아래에서 확인해 보세요. ``` from qiskit.circuit.library import EfficientSU2, TwoLocal, NLocal, PauliTwoDesign from qiskit_nature.circuit.library import UCCSD, PUCCD, SUCCD ############################## # Provide your code here ansatz = ############################## ansatz.decompose().draw() ``` ## 바닥 상태 에너지 계산 ### Numpy를 사용해 계산하기 학습 목적으로, 행렬의 대각화를 통해 이 문제를 정확하게 푼 값을 VQE가 얻어야 하는 목표값으로 삼을 수 있습니다. 이 행렬의 차원은 분자 궤도의 수가 늘어남에 따라 기하급수적으로 확장되기 때문에 문제의 대상인 큰 분자의 경우 계산에 다소 시간이 소요 될수 있습니다. 아주 커다란 분자 시스템을 계산할 경우 파동 함수를 저장할 메모리 공간이 부족할 수 있습니다. <center><img src="resources/vqe.png" width="600"></center> ``` from qiskit.algorithms import NumPyMinimumEigensolver from qiskit_nature.algorithms import GroundStateEigensolver ############################## # Provide your code here numpy_solver = numpy_ground_state_solver = numpy_results = ############################## exact_energy = numpy_results.computed_energies[0] print(f"Exact electronic energy: {exact_energy:.6f} Hartree\n") print(numpy_results) # Check your answer and submit using the following code from qc_grader import grade_ex2c grade_ex2c(numpy_results) ``` ### VQE로 계산하기 다음 단계는 VQE를 사용하여 바닥 상태 에너지를 계산하는 것입니다. 여러분의 전자 문제의 해결까지 이제 절반가량 왔습니다! ``` from qiskit.providers.aer import StatevectorSimulator, QasmSimulator from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SPSA, SLSQP ############################## # Provide your code here backend = optimizer = ############################## from qiskit.algorithms import VQE from qiskit_nature.algorithms import VQEUCCFactory, GroundStateEigensolver from jupyterplot import ProgressPlot import numpy as np error_threshold = 10 # mHartree np.random.seed(5) # fix seed for reproducibility initial_point = np.random.random(ansatz.num_parameters) # for live plotting pp = ProgressPlot(plot_names=['Energy'], line_names=['Runtime VQE', f'Target + {error_threshold}mH', 'Target']) intermediate_info = { 'nfev': [], 'parameters': [], 'energy': [], 'stddev': [] } def callback(nfev, parameters, energy, stddev): intermediate_info['nfev'].append(nfev) intermediate_info['parameters'].append(parameters) intermediate_info['energy'].append(energy) intermediate_info['stddev'].append(stddev) pp.update([[energy, exact_energy+error_threshold/1000, exact_energy]]) ############################## # Provide your code here vqe = vqe_ground_state_solver = vqe_results = ############################## print(vqe_results) error = (vqe_results.computed_energies[0] - exact_energy) * 1000 # mHartree print(f'Error is: {error:.3f} mHartree') # Check your answer and submit using the following code from qc_grader import grade_ex2d grade_ex2d(vqe_results) ``` ## 들뜬 상태 계산 ### QEOM을 사용한 계산 이번에는 여러분의 분자 해밀토니안의 들뜬 상태를 계산해 보도록 하겠습니다. 시스템은 위에서 이미 정의했으므로, 이번에는 양자 운동 방정식(qEOM) 알고리즘을 사용하여 들뜬 상태의 에너지를 아래의 유사 고유값 문제로 풀어보도록 하겠습니다. <center><img src="resources/math-1.svg" width="400"></center> 에서 <center><img src="resources/math-2.svg" width="300"></center> 이며, 각 매트릭스 요소는 각각에 해당하는 기저 상태와 함께 양자 컴퓨터에서 측정되어야 합니다. 더 깊은 이해를 위해, 들뜬 상태 계산에 대한 [**this tutorial**](https://qiskit.org/documentation/nature/tutorials/04_excited_states_solvers.html)을 읽어보시길 권장하며, qEOM 이론 자체는 [**corresponding paper by Ollitrault et al., 2019**](https://arxiv.org/abs/1910.12890)을 읽어보시길 권장합니다. ``` from qiskit_nature.algorithms import QEOM ############################## # Provide your code here qeom_excited_state_solver = qeom_results = ############################## print(qeom_results) # Check your answer and submit using the following code from qc_grader import grade_ex2e grade_ex2e(qeom_results) ``` 최종적으로, 위에서 얻은 두 세트의 들뜬 상태의 에너지와 바닥 상태의 에너지 차이를 계산하여 밴드 갭 또는 에너지 갭(전자가 바닥 상태에서 들뜬 상태로 벗어나는 데 필요한 최소 에너지량)을 얻게 됩니다. ``` bandgap = qeom_results.computed_energies[1] - qeom_results.computed_energies[0] bandgap # in Hartree ``` ## Qiskit Runtime을 사용하여 클라우드에서 VQE를 실행하기 Qiskit Runtime은 IBM Quantum이 제공하는 새로운 아키텍처로, 많은 반복이 필요한 계산을 간소화합니다. Qiskit runtime은 개선된 하이브리드 양자/고전 프로세스 내에서 실험의 실행 속도를 눈에 띄게 개선합니다. 인증된 사용자는 Qiskit Runtime을 사용해 자신과 다른 사용자가 사용할 수 있도록 Qiskit으로 작성된 양자 프로그램을 업로드할 수 있습니다. Qiskit 양자 프로그램은 Qiskit Runtime 프로그램이라고도 불리며 특정 입력을 받아 양자 및 고전적인 계산을 수행하고 원하는 경우 반응형으로 중간 결과를 제공하고 처리 결과를 반환하는 파이썬으로 작성된 코드를 의미합니다. 사용자 본인 혹은 다른 인증된 사용자는 프로그램 동작에 필요한 입력 매개 변수를 전송하여 업로드된 양자 프로그램을 호출해 사용할 수 있습니다. <center><img src="resources/qiskit-runtime1.gif" width="600"></center> <center><img src="resources/runtime_arch.png" width="600"></center> Qiskit Runtime을 사용하여 VQE를 실행하기 위해서는, 로컬 VQE 실행 단계에서의 변화는 거의 없으므로, VQE 클래스를 VQEProgram 클래스로 교체해야 합니다. 두 가지 모두 동일한 MinimumEigensolver 인터페이스를 따르기때문에 compute_minimum_eigenvalue 메서드를 공유하여 알고리즘이 실행되고 같은 유형의 결과 개체가 반환합니다. 단지 이니셜라이저의 서명이 약간 다를 뿐입니다. 먼저 Qiskit Runtime 서비스에 액세스할 수 있는 공급자와 회로를 실행할 백엔드를 선택합니다. Qiskit Runtime에 대한 추가정보는, [**VQEProgram**](https://qiskit.org/documentation/partners/qiskit_runtime/tutorials/vqe.html#Runtime-VQE:-VQEProgram)과 [**Leveraging Qiskit Runtime**](https://qiskit.org/documentation/nature/tutorials/07_leveraging_qiskit_runtime.html) 튜토리얼을 참고 하십시오. ``` from qc_grader.util import get_challenge_provider provider = get_challenge_provider() backend = provider.get_backend('ibmq_qasm_simulator') from qiskit_nature.runtime import VQEProgram error_threshold = 10 # mHartree # for live plotting pp = ProgressPlot(plot_names=['Energy'], line_names=['Runtime VQE', f'Target + {error_threshold}mH', 'Target']) intermediate_info = { 'nfev': [], 'parameters': [], 'energy': [], 'stddev': [] } def callback(nfev, parameters, energy, stddev): intermediate_info['nfev'].append(nfev) intermediate_info['parameters'].append(parameters) intermediate_info['energy'].append(energy) intermediate_info['stddev'].append(stddev) pp.update([[energy,exact_energy+error_threshold/1000, exact_energy]]) ############################## # Provide your code here optimizer = { 'name': 'QN-SPSA', # leverage the Quantum Natural SPSA # 'name': 'SPSA', # set to ordinary SPSA 'maxiter': 100, } runtime_vqe = ############################## ``` <div class="alert alert-block alert-success"> **도전 2f 채점 설명** 이 도전 과제의 채점은 이전의 연습들과 약간 다릅니다. 1. 우선 `prepare_ex2f`를 사용해 `runtime_vqe (VQEProgram)`, `qubit_converter (QubitConverter)`, `es_problem (ElectronicStructureProblem)`이 포함된 runtime job을 IBM Quantum으로 시뮬레이션 계산을 위해 전송합니다. 대기 상태에 따라 실행에 몇 분이 소요될 수 있습니다. 내부적으로 `prepare_ex2f`은 다음의 작업을 합니다: ```python runtime_vqe_groundstate_solver = GroundStateEigensolver(qubit_converter, runtime_vqe) runtime_vqe_result = runtime_vqe_groundstate_solver.solve(es_problem) ``` 2. 작업이 완료된 후, `grade_ex2f`를 사용해 정답을 확인한 후 제출합니다. ``` # Submit a runtime job using the following code from qc_grader import prepare_ex2f runtime_job = prepare_ex2f(runtime_vqe, qubit_converter, es_problem) # Check your answer and submit using the following code from qc_grader import grade_ex2f grade_ex2f(runtime_job) print(runtime_job.result().get("eigenvalue")) ``` 축하합니다! 첫 번째 Qiskit Runtime 프로그램을 제출하고 도전 과제를 성공했습니다. 하지만 즐거움은 끝나지 않았습니다! 여러분을 위해 양자 챌린지를 위한 전용 양자 시스템을 예약해 두었습니다. 채점되지 않는 보너스 연습으로 실제 양자 시스템에 VQE 런타임 작업을 실행해 보십시오! <div class="alert alert-block alert-success"> ** VQE를 실제 양자 시스템에서 실행시키기(부수적인)** 이 과제를 위해 전용 양자 시스템 [`ibm_perth`](https://quantum-computing.ibm.com/services?services=systems&system=ibm_perth)을 예약해 두었습니다. 실제 양자 시스템에 런타임 작업을 제출하려면 아래 단계를 따르십시오. 1. 백엔드 선택을 `ibm_perth`로 업데이트하고 `runtime_vqe`에 다시 전달하십시오. ``` backend = provider.get_backend('ibm_perth') runtime_vqe = VQEProgram(... backend=backend, ...) ``` 2. `prepare_ex2f`의 `real_device`플래그를 `True`로 설정합니다. 3. `prepare_ex2f`를 실행하여 runtime 작업을 `ibm_perth`로 전달합니다. </div> <div class="alert alert-block alert-danger"> 노트: Qiskit 런타임은 VQE 속도를 최대 5배까지 높입니다. 하지만 각 런타임 작업은 여전히 30~60분의 양자 프로세서 시간이 소요될 수 있습니다. 따라서 **작업** 을 완료하기 위한 대기열 시간은 작업을 제출하는 참가자 수에 따라 몇 시간 또는 며칠이 될 수도 있습니다. 모든 참가자에게 즐거운 경험을 제공하기 위해 먼저 시뮬레이터를 사용하여 다음 설정을 시도한 이후에만 실제 양자 시스템에 작업을 제출해 주십시오 1. `PartiyMapper`를 사용하고 `two_qubit_reduction=True`를 설정하여 필요한 큐비트 수를 2로 줄여서 VQE 프로그램이 바닥 상태 에너지에 더 빠르게 수렴하도록 합니다(더 적은 반복 횟수로). 1. 최적화 옵션 중 'maxiter=100 혹은 그 이하로 제한합니다. 시뮬레이터를 사용하여 최적의 낮은 반복 횟수를 찾습니다. 1. 시뮬레이터를 백엔드로 사용하여 `grade_ex2f`에 VQE를 전달하여 런타임 프로그램이 올바르게 작성 되었는지 먼저 확인합니다. 1. 더 많은 참가자가 실제 양자 시스템에서 런타임을 시도할 수 있도록 작업을 참가자당 1개로 제한합니다. 작업 실행이 너무 오래 걸리거나 챌린지가 종료되기 전에 실행할 수 없는 경우에도 걱정하지 마십시오. 이것은 부수적인 도전입니다. 실제 양자 시스템에서 작업을 실행하지 않고도 모든 도전 과제를 통과하고 디지털 배지를 취득 할 수 있습니다. </div> ``` # Please change `real_device` flag to True if you want to send a job to the real quantum system runtime_job_real_device = prepare_ex2f(runtime_vqe, qubit_converter, es_problem, real_device=False) print(runtime_job_real_device.result().get("eigenvalue")) ``` ## 추가 정보 **제작자:** Junye Huang, Samanvay Sharma **한글번역:** 신소영, 김정원 **Version:** 1.0.0
github_jupyter
``` import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader from torch.utils.data import sampler import skorch import torchvision.datasets as dset import torchvision.transforms as T import torchvision.models as models import collections from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_validate import numpy as np import matplotlib # matplotlib.use('agg') import matplotlib.pyplot as plt from Conv3D_Dataset import ILDDataset USE_GPU = True dtype = torch.float32 # we will be using float throughout this tutorial if USE_GPU and torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') # Constant to control how frequently we print train loss print_every = 100 print('using device:', device) NUM_TOTAL = 1979 NUM_TRAIN = 1600 #add path as absolute path for root dir im_size = 128 lung_dataset_train = ILDDataset(csv_file=r'C:/Users/Akrofi/Desktop/CS 231/Project/train_labels.csv', root_dir=r'C:/Users/Akrofi/Desktop/CS 231/Project/train',mask=True, train=True, resize=im_size) #add path as absolute path for root dir lung_dataset_test = ILDDataset(csv_file=r'C:/Users/Akrofi/Desktop/CS 231/Project/test_labels.csv', root_dir=r'C:/Users/Akrofi/Desktop/CS 231/Project/test', mask=True, train=False, resize=im_size) dataset_x = [] dataset_y = [] for i in range(20): x = i print(x) a = lung_dataset_train[i][0].tolist() dataset_x.append(a) b = lung_dataset_train[i][1].tolist() dataset_y.append(b) a = np.asarray(dataset_x) b = np.asarray(dataset_y) print(len(a)) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(np.asarray(dataset_x), np.asarray(dataset_y), test_size=0.20, random_state=42, stratify = dataset_y) print (y_test) class_sample_count = np.array([len(np.where(y_train==t)[0]) for t in np.unique(y_train)]) weight = 1. / class_sample_count samples_weight = np.array([weight[t] for t in y_train]) samples_weight = torch.from_numpy(samples_weight) sampler = WeightedRandomSampler(samples_weight.type('torch.DoubleTensor'), len(samples_weight)) trainDataset = torch.utils.data.TensorDataset(torch.FloatTensor(X_train), torch.LongTensor(y_train.astype(int))) validDataset = torch.utils.data.TensorDataset(torch.FloatTensor(X_test), torch.LongTensor(y_test.astype(int))) trainLoader = torch.utils.data.DataLoader(dataset = trainDataset, batch_size=bs, num_workers=1, sampler = sampler) testLoader = torch.utils.data.DataLoader(dataset = validDataset, batch_size=bs, shuffle=False, num_workers=1) for i, (data, target) in enumerate(trainLoader): print ("batch index {}, 0/1: {}/{}".format( i, len(np.where(target.numpy()==0)[0]), len(np.where(target.numpy()==1)[0]))) loader_train = DataLoader(lung_dataset_train, batch_size= 16, sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN))) loader_val = DataLoader(lung_dataset_train, batch_size=16, sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, NUM_TOTAL))) loader_test = DataLoader(lung_dataset_test, batch_size=16) #show datasample sample = lung_dataset_train[810] plt.imshow(sample[0][3], cmap='gray') plt.show() print("label: " + str(sample[1])) def flatten(x): N = x.shape[0] # read in N, C, H, W return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image class Flatten(nn.Module): def forward(self, x): return flatten(x) def get_model(): """ Used to fetch model for classification """ in_channel = 1 channel_1 = 32 channel_2 = 64 channel_3 = 32 num_classes = 3 model = nn.Sequential( nn.Conv3d(in_channel,channel_1, kernel_size=5, padding=2, stride= 1 ), nn.ReLU(), nn.Conv3d(channel_1, channel_2, kernel_size=3, padding=1, stride=1), nn.ReLU(), nn.BatchNorm3d(num_features= channel_2), nn.MaxPool3d(kernel_size=1,stride=1), nn.Conv3d(channel_2, channel_3, kernel_size=3, padding=1, stride=1), nn.ReLU(), Flatten(), nn.Linear(2097152, num_classes) ) return model def check_accuracy(loader, model, val =False, train=False): if train == True: print('Checking accuracy on training set') elif val == True: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') num_correct = 0 num_samples = 0 model.eval() # set model to evaluation mode with torch.no_grad(): for x, y in loader: x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU y = y.to(device=device, dtype=torch.long) [N,C,H,W] = [*x.size()] # print(y) # a = np.split(y,4) # y = np.zeros(len(a)) # for i in range(len(a)): # b = collections.Counter(a[i]).most_common()[0][0] # y[i] = (b) # y = torch.LongTensor(y) # y = y.to(device=device, dtype=torch.long) scores = model(x.view(N, 1 , C, H , W)) _, preds = scores.max(1) num_correct += (preds == y).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc)) return acc def train(model, optimizer, epochs=1, overfit=False): """ Inputs: - model: A PyTorch Module giving the model to train. - optimizer: An Optimizer object we will use to train the model - epochs: (Optional) A Python integer giving the number of epochs to train for Returns: Nothing, but prints model accuracies during training. """ model = model.to(device=device) # move the model parameters to CPU/GPU best_acc = 0; for e in range(epochs): for t, (x, y) in enumerate(loader_train): model.train() # put model to training mode x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU y = y.to(device=device, dtype=torch.long) [N,H,C,W] = [*x.size()] # print(y) # a = np.split(y,4) # y = np.zeros(len(a)) # for i in range(len(a)): # b = collections.Counter(a[i]).most_common()[0][0] # y[i] = b # y = torch.LongTensor(y) # y = y.to(device=device, dtype=torch.long) weights = torch.cuda.FloatTensor([1,1,1]) scores = model(x.view(N, 1, C, H , W)) Loss = nn.CrossEntropyLoss(weight = weights) loss = Loss(scores, y) reg = torch.tensor(2.5e-2, device= device, dtype=dtype) l2_reg = torch.tensor(0. , device= device, dtype=dtype) for param in model.parameters(): l2_reg += torch.norm(param) loss += reg * l2_reg # Zero out all of the gradients for the variables which the optimizer # will update. optimizer.zero_grad() # This is the backwards pass: compute the gradient of the loss with # respect to each parameter of the model. loss.backward() # Actually update the parameters of the model using the gradients # computed by the backwards pass. optimizer.step() # if t % print_every == 0: print('Iteration %d, loss = %.4f' % (t, loss.item())) if(overfit): check_accuracy(loader_train, model, _, train=True) acc = check_accuracy(loader_validation, model, True, _) if acc > best_acc: best_acc = acc best_model = model print() return best_model model1 = get_model() optimizer = optim.Adam(model1.parameters(), lr = 0.0001, weight_decay= 1e-4) best_model = train(model1, optimizer, epochs= 10, overfit=True) #Check Test set check_accuracy(loader_test, best_model) # Define model ```
github_jupyter
# Tutorial: Computing with shapes of landmarks in Kendall shape spaces Lead author: Nina Miolane. In this tutorial, we show how to use geomstats to perform a shape data analysis. Specifically, we aim to study the difference between two groups of data: - optical nerve heads that correspond to normal eyes, - optical nerve heads that correspond to glaucoma eyes. We wish to investigate if there is a difference in these two groups, and if this difference is a difference in sizes of the optical nerve heads, or a difference in shapes (where the size has been quotiented out). <img src="figures/optic_nerves.png" /> ## Set up ``` import os import sys import warnings sys.path.append(os.path.dirname(os.getcwd())) warnings.filterwarnings('ignore') %matplotlib inline import matplotlib.colors as colors import matplotlib.patches as mpatches import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection import geomstats.backend as gs import geomstats.datasets.utils as data_utils from geomstats.geometry.pre_shape import PreShapeSpace, KendallShapeMetric ``` We import the dataset of the optical nerve heads from 22 images of Rhesus monkeys’ eyes (11 monkeys), available in [[PE2015]](#References). For each monkey, an experimental glaucoma was introduced in one eye, while the second eye was kept as control. One seeks to observe differences between the glaucoma and the control eyes. On each image, 5 anatomical landmarks were recorded: - 1st landmark: superior aspect of the retina, - 2nd landmark: side of the retina closest to the temporal bone of the skull, - 3rd landmark: nose side of the retina, - 4th landmark: inferior point, - 5th landmark: optical nerve head deepest point. Label 0 refers to a normal eye, and Label 1 to an eye with glaucoma. ``` nerves, labels, monkeys = data_utils.load_optical_nerves() print(nerves.shape) print(labels) print(monkeys) ``` We extract the landmarks' sets corresponding to the two eyes' nerves of the first monkey, with their corresponding labels. ``` two_nerves = nerves[monkeys==0] print(two_nerves.shape) two_labels = labels[monkeys==0] print(two_labels) label_to_str = {0: 'Normal nerve', 1: 'Glaucoma nerve'} label_to_color = {0: (102/255, 178/255, 255/255, 1.), 1: (255/255, 178/255, 102/255, 1.)} fig = plt.figure() ax = Axes3D(fig) ax.set_xlim((2000, 4000)) ax.set_ylim((1000, 5000)) ax.set_zlim((-600, 200)) for nerve, label in zip(two_nerves, two_labels): x = nerve[:, 0] y = nerve[:, 1] z = nerve[:, 2] verts = [list(zip(x,y,z))] poly = Poly3DCollection(verts, alpha=0.5) color = label_to_color[int(label)] poly.set_color(colors.rgb2hex(color)) poly.set_edgecolor('k') ax.add_collection3d(poly) patch_0 = mpatches.Patch(color=label_to_color[0], label=label_to_str[0], alpha=0.5) patch_1 = mpatches.Patch(color=label_to_color[1], label=label_to_str[1], alpha=0.5) plt.legend(handles=[patch_0, patch_1], prop={'size': 14}) plt.show() ``` We first try to detect if there are two groups of optical nerve heads, based on the 3D coordinates of the landmarks sets. ``` from geomstats.geometry.euclidean import EuclideanMetric nerves_vec = nerves.reshape(22, -1) eucl_metric = EuclideanMetric(nerves_vec.shape[-1]) eucl_dist = eucl_metric.dist_pairwise(nerves_vec) plt.figure() plt.imshow(eucl_dist); ``` We do not see any two clear clusters. We want to investigate if there is a difference between these two groups of shapes - normal nerve versus glaucoma nerve - or if the main difference is merely relative to the global size of the landmarks' sets. ``` m_ambient = 3 k_landmarks = 5 preshape = PreShapeSpace(m_ambient=m_ambient, k_landmarks=k_landmarks) matrices_metric = preshape.embedding_metric sizes = matrices_metric.norm(preshape.center(nerves)) plt.figure(figsize=(6, 4)) for label, col in label_to_color.items(): label_sizes = sizes[labels==label] plt.hist(label_sizes, color=col, label=label_to_str[label], alpha=0.5, bins=10) plt.axvline(gs.mean(label_sizes), color=col) plt.legend(fontsize=14) plt.title('Sizes of optical nerves', fontsize=14); ``` The vertical lines represent the sample mean of each group (normal/glaucoma). ``` plt.figure(figsize=(6, 4)) plt.hist(sizes[labels==1] - sizes[labels==0], alpha=0.5) plt.axvline(0, color='black') plt.title('Difference in size of optical nerve between glaucoma and normal eyes', fontsize=14); ``` We perform a hypothesis test, testing if the two samples of sizes have the same average. We use the t-test for related samples, since the sample elements are paired: two eyes for each monkey. ``` from scipy import stats signif_level = 0.05 tstat, pvalue = stats.ttest_rel(sizes[labels==0], sizes[labels==1]) print(pvalue < signif_level) ``` There is a significative difference, in optical nerve eyes' sizes, between the glaucoma and normal eye. We want to investigate if there is a difference in shapes, where the size component has been quotiented out. We project the data to the Kendall pre-shape space, which: - centers the nerve landmark sets so that they share the same barycenter, - normalizes the sizes of the landmarks' sets to 1. ``` nerves_preshape = preshape.projection(nerves) print(nerves_preshape.shape) print(preshape.belongs(nerves_preshape)) print(gs.isclose(matrices_metric.norm(nerves_preshape), 1.)) ``` In order to quotient out the 3D orientation component, we align the landmark sets in the preshape space. ``` base_point = nerves_preshape[0] nerves_shape = preshape.align(point=nerves_preshape, base_point=base_point) ``` The Kendall metric is a Riemannian metric that takes this alignment into account. It corresponds to the metric of the Kendall shape space, which is the manifold defined as the preshape space quotient by the action of the rotation in m_ambient dimensions, here in 3 dimensions. ``` kendall_metric = KendallShapeMetric(m_ambient=m_ambient, k_landmarks=k_landmarks) ``` We can use it to perform a tangent PCA in the Kendall shape space, and determine if we see a difference in the shapes of the optical nerves. ``` from geomstats.learning.pca import TangentPCA tpca = TangentPCA(kendall_metric) tpca.fit(nerves_shape) plt.plot( tpca.explained_variance_ratio_) plt.xlabel("Number of principal tangent components", size=14) plt.ylabel("Fraction of explained variance", size=14); ``` Two principal components already describe around 60% of the variance. We plot the data projected in the tangent space defined by these two principal components. ``` X = tpca.transform(nerves_shape) plt.figure(figsize=(12, 12)) for label, col in label_to_color.items(): mask = labels == label plt.scatter(X[mask, 0], X[mask, 1], color=col, s=100, label=label_to_str[label]); plt.legend(fontsize=14); for label, x, y in zip(monkeys, X[:, 0], X[:, 1]): plt.annotate( label, xy=(x, y), xytext=(-20, 20), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0')) plt.show() ``` The indices represent the monkeys' indices. In contrast to the above study focusing on the optical nerves' sizes, visual inspection does not reveal any clusters between the glaucoma and normal optical nerves' shapes. We also do not see any obvious pattern between the two optical nerves of the same monkey. This shows that the difference between the optical nerve heads mainly resides in the over sizes of the optical nerves. ``` dist_pairwise = kendall_metric.dist_pairwise(nerves_shape) print(dist_pairwise .shape) plt.figure() plt.imshow(dist_pairwise); ``` We try a agglomerative hierarchical clustering to investigate if we can cluster in the Kendall shape space. ``` from geomstats.learning.agglomerative_hierarchical_clustering import AgglomerativeHierarchicalClustering clustering = AgglomerativeHierarchicalClustering(distance='precomputed', n_clusters=2) clustering.fit(dist_pairwise) predicted_labels = clustering.labels_ print('True labels:', labels) print('Predicted labels:', predicted_labels) accuracy = gs.sum(labels==predicted_labels) / len(labels) print(f'Accuracy: {accuracy:.2f}') ``` The accuracy is barely above the accuracy of a random classifier, that would assign 0 or 1 with probably 0.5 to each of the shapes. This confirms that the difference that exists between the two groups is mostly due to the landmarks' set size and not their shapes. ## References .. [PE2015] Patrangenaru and L. Ellingson. Nonparametric Statistics on Manifolds and Their Applications to Object Data, 2015. https://doi.org/10.1201/b18969
github_jupyter