code
stringlengths
2.5k
150k
kind
stringclasses
1 value
``` %load_ext autoreload %autoreload 2 import numpy as np import random import torch from collections import defaultdict from scipy.sparse import csr_matrix from sklearn.cluster import AgglomerativeClustering from tqdm.auto import tqdm from src.data.filesystem import fopen from src.data.ancestry import load_train_test from src.data.prepare import normalize from src.models.utils import add_padding, remove_padding, build_token_idx_maps, convert_names_to_model_inputs, get_best_matches ``` ### Configure ``` sample_size = 0 max_closure_size = 10000 max_distance = 0.22 cluster_distance_threshold = 0.155 super_cluster_distance_threshold = 0.205 num_candidates = 1000 eps = 0.000001 model_filename = '../data/models/anc-triplet-bilstm-100-512-40-05.pth' # process_nicknames = True # werelate_names_filename = 'givenname_similar_names.werelate.20210414.tsv' # nicknames_filename = '../data/models/givenname_nicknames.txt' # name_freqs_filename = 'given-final.normal.txt' # clusters_filename = 'givenname_clusters.tsv' # super_clusters_filename = 'givenname_super_clusters.tsv' werelate_names_filename = '../data/external/surname_similar_names.werelate.20210414.tsv' nicknames_filename = '' name_freqs_filename = '../data/external/surname-final.normal.txt' clusters_filename = '../data/models/ancestry_surname_clusters-20211028.tsv' super_clusters_filename = '../data/models/ancestry_surname_super_clusters-20211028.tsv' is_surname = True ``` ### Read WeRelate names into all_names Later, we'll want to read frequent FS names into all_names ``` # TODO rewrite this in just a few lines using pandas def load_werelate_names(path, is_surname): name_variants = defaultdict(set) with fopen(path, mode="r", encoding="utf-8") as f: is_header = True for line in f: if is_header: is_header = False continue fields = line.rstrip().split("\t") # normalize should only return a single name piece, but loop just in case for name_piece in normalize(fields[0], is_surname): confirmed_variants = fields[1].strip().split(" ") if len(fields) >= 2 else [] computer_variants = fields[2].strip().split(" ") if len(fields) == 3 else [] variants = confirmed_variants + computer_variants for variant in variants: for variant_piece in normalize(variant, is_surname): name_variants[name_piece].add(variant_piece) return name_variants all_names = set() name_variants = load_werelate_names(werelate_names_filename, is_surname) print(len(name_variants)) for k, v in name_variants.items(): all_names.add(add_padding(k)) all_names.update(add_padding(variant) for variant in v) print(len(all_names), next(iter(all_names))) name_variants = None ``` ### Read nicknames and remove from names ``` def load_nicknames(path): nicknames = defaultdict(set) with fopen(path, mode="r", encoding="utf-8") as f: for line in f: names = line.rstrip().split(" ") # normalize should only return a single name piece, but loop just in case for name_piece in normalize(names[0], False): orig_name = add_padding(name_piece) for nickname in names[1:]: for nickname_piece in normalize(nickname, False): nicknames[add_padding(nickname_piece)].add(orig_name) return nicknames name_nicks = defaultdict(set) if not is_surname: nick_names = load_nicknames(nicknames_filename) for nick, names in nick_names.items(): for name in names: name_nicks[name].add(nick) print(next(iter(nick_names.items())), "nick_names", len(nick_names.keys()), "name_nicks", len(name_nicks.keys())) all_names -= set(nickname for nickname in nick_names.keys()) print(len(all_names)) ``` ### Map names to ids ``` def map_names_to_ids(names): ids = range(len(names)) return dict(zip(names, ids)), dict(zip(ids, names)) name_ids, id_names = map_names_to_ids(all_names) print(next(iter(name_ids.items())), next(iter(id_names.items()))) ``` ### Read name frequencies ``` # TODO rewrite this using pandas too def load_name_freqs(path, is_surname): name_freqs = defaultdict(int) with fopen(path, mode="r", encoding="utf-8") as f: for line in f: fields = line.rstrip().split("\t") for name_piece in normalize(fields[0], is_surname): name_freqs[name_piece] = int(fields[1]) return name_freqs name_freqs = load_name_freqs(name_freqs_filename, is_surname) # keep only entries in all_names name_freqs = dict((add_padding(k),v) for k,v in name_freqs.items() if add_padding(k) in all_names) print(len(name_freqs), next(iter(name_freqs.items()))) ``` ### Load model ``` model = torch.load(model_filename) ``` ### Encode names ``` MAX_NAME_LENGTH=30 char_to_idx_map, idx_to_char_map = build_token_idx_maps() ``` #### Take a sample because encoded names require a lot of memory ``` if sample_size <= 0 or sample_size >= len(all_names): names_sample = np.array(list(all_names)) else: names_sample = np.array(random.sample(all_names, sample_size)) print(names_sample.shape) ``` #### Compute encodings ``` # Get embeddings names_tensor, _ = convert_names_to_model_inputs(names_sample, char_to_idx_map, MAX_NAME_LENGTH) # Get encodings for the names from the encoder # TODO why do I need to encode in chunks? chunk_size = 10000 nps = [] for begin in tqdm(range(0, len(names_tensor), chunk_size)): nps.append(model(names_tensor[begin:begin+chunk_size], just_encoder=True).detach().numpy()) names_encoded = np.concatenate(nps, axis=0) nps = None names_encoded.shape ``` ### Compute distances ``` name_candidates = get_best_matches(names_encoded, names_encoded, names_sample, num_candidates=num_candidates, metric='euclidean') # what's going on here? distances = np.hstack((np.repeat(names_sample, num_candidates)[:, np.newaxis], name_candidates.reshape(-1,2))) # remove distances > max_distance distances = distances[distances[:, -1].astype('float') <= max_distance] # sort distances = distances[distances[:, -1].astype('float').argsort()] print(distances.shape) name_candidates = None ``` ### Compute closures ``` # iterate over all distances, create closures and save scores next_closure = 0 closure_ids = {} id_closure = {} row_ixs = [] col_ixs = [] dists = [] max_size = 0 for row in tqdm(distances): name1 = row[0] name2 = row[1] id1 = name_ids[name1] id2 = name_ids[name2] # each distance is in distances twice if id1 > id2: continue distance = max(eps, float(row[2])) closure1 = id_closure.get(id1) closure2 = id_closure.get(id2) if closure1 is None and closure2 is not None: id1, id2 = id2, id1 name1, name2 = name2, name1 closure1, closure2 = closure2, closure1 # add to distance matrix row_ixs.append(id1) col_ixs.append(id2) dists.append(distance) # skip if names are the same if id1 == id2: continue row_ixs.append(id2) col_ixs.append(id1) dists.append(distance) # create closures if closure1 is None: # if closure1 is None, then closure2 must be none also due to the above # so create a new closure with id1 and id2 closure1 = next_closure next_closure += 1 id_closure[id1] = closure1 id_closure[id2] = closure1 closure_ids[closure1] = [id1, id2] next_closure += 1 elif closure2 is None: # put id2 into id1's closure id_closure[id2] = closure1 closure_ids[closure1].append(id2) elif closure1 != closure2 and len(closure_ids[closure1]) + len(closure_ids[closure2]) <= max_closure_size: # move all ids in closure2 into closure1 for id in closure_ids[closure2]: id_closure[id] = closure1 closure_ids[closure1].append(id) del closure_ids[closure2] if len(closure_ids[closure1]) > max_size: max_size = len(closure_ids[closure1]) # create distances matrix dist_matrix = csr_matrix((dists, (row_ixs, col_ixs))) print("max closure_size", max_size) print("number of closures", len(closure_ids), "number of names enclosed", len(id_closure)) ``` ### Compute clusters ``` def compute_clusters(closure_ids, id_names, dist_matrix, linkage, distance_threshold, eps, max_dist): cluster_names = defaultdict(set) name_cluster = {} for closure, ids in tqdm(closure_ids.items()): clusterer = AgglomerativeClustering(n_clusters=None, affinity='precomputed', linkage=linkage, distance_threshold=distance_threshold) X = dist_matrix[ids][:, ids].todense() X[X < eps] = max_dist labels = clusterer.fit_predict(X) for id, label in zip(ids, labels): name = id_names[id] cluster = f'{closure}_{label}' cluster_names[cluster].add(name) name_cluster[name] = cluster return cluster_names, name_cluster # try ward, average, single cluster_linkage = 'average' max_dist = 10.0 cluster_names, name_cluster = compute_clusters(closure_ids, id_names, dist_matrix, cluster_linkage, cluster_distance_threshold, eps, max_dist) print(len(cluster_names)) ``` #### Add unclustered names as singleton clusters ``` def add_singleton_names(cluster_names, name_cluster, names_sample): for ix, name in enumerate(names_sample): if name not in name_cluster: cluster = f'{ix}' cluster_names[cluster].add(name) name_cluster[name] = cluster return cluster_names, name_cluster cluster_names, name_cluster = add_singleton_names(cluster_names, name_cluster, names_sample) print(len(cluster_names)) ``` ### Eval cluster P/R over Ancestry test data ``` train, test = load_train_test("../data/raw/records25k_data_train.csv", "../data/raw/records25k_data_test.csv") _, _, candidates_train = train input_names_test, weighted_relevant_names_test, candidates_test = test all_candidates = np.concatenate((candidates_train, candidates_test)) def get_precision_recall(names_sample, all_candidates, input_names_test, weighted_relevant_names_test, cluster_names, name_cluster): names_sample_set = set(names_sample.tolist()) all_candidates_set = set(all_candidates.tolist()) precisions = [] recalls = [] for input_name, weighted_relevant_names in zip(input_names_test, weighted_relevant_names_test): if input_name not in names_sample_set: continue cluster_id = name_cluster[input_name] names_in_cluster = cluster_names[cluster_id] & all_candidates_set found_recall = 0.0 total_recall = 0.0 found_count = 0 for name, weight, _ in weighted_relevant_names: if name in names_sample_set: total_recall += weight if name in names_in_cluster: found_recall += weight found_count += 1 if total_recall == 0.0: continue precision = found_count / len(names_in_cluster) if len(names_in_cluster) > 0 else 1.0 recall = found_recall / total_recall precisions.append(precision) recalls.append(recall) avg_precision = sum(precisions) / len(precisions) avg_recall = sum(recalls) / len(recalls) return avg_precision, avg_recall, len(precisions) precision, recall, total = get_precision_recall(names_sample, all_candidates, input_names_test, weighted_relevant_names_test, cluster_names, name_cluster) print("Total=", total, " Precision=", precision, " Recall=", recall) ``` ### Write clusters ``` def write_clusters(path, cluster_names, name_freqs, name_nicks): cluster_id_name_map = {} with fopen(path, mode="w", encoding="utf-8") as f: for cluster_id, names in cluster_names.items(): # get most-frequent name cluster_name = max(names, key=(lambda name: name_freqs.get(name, 0))) # map cluster id to cluster name cluster_id_name_map[cluster_id] = cluster_name # add nicknames nicknames = set() if name_nicks: for name in names: if name in name_nicks: nicknames.update(name_nicks[name]) # remove padding cluster_name = remove_padding(cluster_name) names = [remove_padding(name) for name in names | nicknames] # write cluster f.write(f'{cluster_name}\t{" ".join(names)}\n') return cluster_id_name_map cluster_id_name_map = write_clusters(clusters_filename, cluster_names, name_freqs, name_nicks) ``` ### Create super-clusters ``` super_cluster_names, name_super_cluster = compute_clusters(closure_ids, id_names, dist_matrix, cluster_linkage, super_cluster_distance_threshold, eps, max_dist) print(len(super_cluster_names)) super_cluster_names, name_super_cluster = add_singleton_names(super_cluster_names, name_super_cluster, names_sample) print(len(super_cluster_names)) precision, recall, total = get_precision_recall(names_sample, all_candidates, input_names_test, weighted_relevant_names_test, super_cluster_names, name_super_cluster) print("Total=", total, " Precision=", precision, " Recall=", recall) # get cluster names for each name in super cluster super_cluster_clusters = {id: set([cluster_id_name_map[name_cluster[name]] for name in names]) for id, names in super_cluster_names.items()} ``` ### Write super-clusters ``` _ = write_clusters(super_clusters_filename, super_cluster_clusters, name_freqs, None) ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Load images with tf.data <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/beta/tutorials/load_data/images"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This tutorial provides a simple example of how to load an image dataset using `tf.data`. The dataset used in this example is distributed as directories of images, with one class of image per directory. ## Setup ``` from __future__ import absolute_import, division, print_function, unicode_literals !pip install tensorflow==2.0.0-beta1 import tensorflow as tf AUTOTUNE = tf.data.experimental.AUTOTUNE ``` ## Download and inspect the dataset ### Retrieve the images Before you start any training, you will need a set of images to teach the network about the new classes you want to recognize. You have already created an archive of creative-commons licensed flower photos to use initially: ``` import pathlib data_root_orig = tf.keras.utils.get_file(origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', fname='flower_photos', untar=True) data_root = pathlib.Path(data_root_orig) print(data_root) ``` After downloading 218MB, you should now have a copy of the flower photos available: ``` for item in data_root.iterdir(): print(item) import random all_image_paths = list(data_root.glob('*/*')) all_image_paths = [str(path) for path in all_image_paths] random.shuffle(all_image_paths) image_count = len(all_image_paths) image_count all_image_paths[:10] ``` ### Inspect the images Now let's have a quick look at a couple of the images, so you know what you are dealing with: ``` import os attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:] attributions = [line.split(' CC-BY') for line in attributions] attributions = dict(attributions) import IPython.display as display def caption_image(image_path): image_rel = pathlib.Path(image_path).relative_to(data_root) return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1]) for n in range(3): image_path = random.choice(all_image_paths) display.display(display.Image(image_path)) print(caption_image(image_path)) print() ``` ### Determine the label for each image List the available labels: ``` label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir()) label_names ``` Assign an index to each label: ``` label_to_index = dict((name, index) for index, name in enumerate(label_names)) label_to_index ``` Create a list of every file, and its label index: ``` all_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in all_image_paths] print("First 10 labels indices: ", all_image_labels[:10]) ``` ### Load and format the images TensorFlow includes all the tools you need to load and process images: ``` img_path = all_image_paths[0] img_path ``` Here is the raw data: ``` img_raw = tf.io.read_file(img_path) print(repr(img_raw)[:100]+"...") ``` Decode it into an image tensor: ``` img_tensor = tf.image.decode_image(img_raw) print(img_tensor.shape) print(img_tensor.dtype) ``` Resize it for your model: ``` img_final = tf.image.resize(img_tensor, [192, 192]) img_final = img_final/255.0 print(img_final.shape) print(img_final.numpy().min()) print(img_final.numpy().max()) ``` Wrap up these up in simple functions for later. ``` def preprocess_image(image): image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, [192, 192]) image /= 255.0 # normalize to [0,1] range return image def load_and_preprocess_image(path): image = tf.io.read_file(path) return preprocess_image(image) import matplotlib.pyplot as plt image_path = all_image_paths[0] label = all_image_labels[0] plt.imshow(load_and_preprocess_image(img_path)) plt.grid(False) plt.xlabel(caption_image(img_path)) plt.title(label_names[label].title()) print() ``` ## Build a `tf.data.Dataset` ### A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method. Slicing the array of strings, results in a dataset of strings: ``` path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths) ``` The `shapes` and `types` describe the content of each item in the dataset. In this case it is a set of scalar binary-strings ``` print(path_ds) ``` Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths. ``` image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE) import matplotlib.pyplot as plt plt.figure(figsize=(8,8)) for n, image in enumerate(image_ds.take(4)): plt.subplot(2,2,n+1) plt.imshow(image) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.xlabel(caption_image(all_image_paths[n])) plt.show() ``` ### A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels: ``` label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64)) for label in label_ds.take(10): print(label_names[label.numpy()]) ``` Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs: ``` image_label_ds = tf.data.Dataset.zip((image_ds, label_ds)) ``` The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field: ``` print(image_label_ds) ``` Note: When you have arrays like `all_image_labels` and `all_image_paths` an alternative to `tf.data.dataset.Dataset.zip` is to slice the pair of arrays. ``` ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels)) # The tuples are unpacked into the positional arguments of the mapped function def load_and_preprocess_from_path_label(path, label): return load_and_preprocess_image(path), label image_label_ds = ds.map(load_and_preprocess_from_path_label) image_label_ds ``` ### Basic methods for training To train a model with this dataset you will want the data: * To be well shuffled. * To be batched. * To repeat forever. * Batches to be available as soon as possible. These features can be easily added using the `tf.data` api. ``` BATCH_SIZE = 32 # Setting a shuffle buffer size as large as the dataset ensures that the data is # completely shuffled. ds = image_label_ds.shuffle(buffer_size=image_count) ds = ds.repeat() ds = ds.batch(BATCH_SIZE) # `prefetch` lets the dataset fetch batches in the background while the model is training. ds = ds.prefetch(buffer_size=AUTOTUNE) ds ``` There are a few things to note here: 1. The order is important. * A `.shuffle` after a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` after a `.batch` would shuffle the order of the batches, but not shuffle the items across batches. 1. You use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory. 1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting. 1. The shuffeled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled. This last point can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function: ``` ds = image_label_ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE) ds = ds.prefetch(buffer_size=AUTOTUNE) ds ``` ### Pipe the dataset to a model Fetch a copy of MobileNet v2 from `tf.keras.applications`. This will be used for a simple transfer learning example. Set the MobileNet weights to be non-trainable: ``` mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False) mobile_net.trainable=False ``` This model expects its input to be normalized to the `[-1,1]` range: ``` help(keras_applications.mobilenet_v2.preprocess_input) ``` <pre> ... This function applies the "Inception" preprocessing which converts the RGB values from [0, 255] to [-1, 1] ... </pre> Before you pass the input to the MobilNet model, you need to convert it from a range of `[0,1]` to `[-1,1]`: ``` def change_range(image,label): return 2*image-1, label keras_ds = ds.map(change_range) ``` The MobileNet returns a `6x6` spatial grid of features for each image. Pass it a batch of images to see: ``` # The dataset may take a few seconds to start, as it fills its shuffle buffer. image_batch, label_batch = next(iter(keras_ds)) feature_map_batch = mobile_net(image_batch) print(feature_map_batch.shape) ``` Build a model wrapped around MobileNet and use `tf.keras.layers.GlobalAveragePooling2D` to average over those space dimensions before the output `tf.keras.layers.Dense` layer: ``` model = tf.keras.Sequential([ mobile_net, tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(len(label_names))]) ``` Now it produces outputs of the expected shape: ``` logit_batch = model(image_batch).numpy() print("min logit:", logit_batch.min()) print("max logit:", logit_batch.max()) print() print("Shape:", logit_batch.shape) ``` Compile the model to describe the training procedure: ``` model.compile(optimizer=tf.keras.optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=["accuracy"]) ``` There are 2 trainable variables - the Dense `weights` and `bias`: ``` len(model.trainable_variables) model.summary() ``` You are ready to train the model. Note that for demonstration purposes you will only run 3 steps per epoch, but normally you would specify the real number of steps, as defined below, before passing it to `model.fit()`: ``` steps_per_epoch=tf.math.ceil(len(all_image_paths)/BATCH_SIZE).numpy() steps_per_epoch model.fit(ds, epochs=1, steps_per_epoch=3) ``` ## Performance Note: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/guide/performance/datasets). The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU, but may not be sufficient for GPU training and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets: ``` import time default_timeit_steps = 2*steps_per_epoch+1 def timeit(ds, steps=default_timeit_steps): overall_start = time.time() # Fetch a single batch to prime the pipeline (fill the shuffle buffer), # before starting the timer it = iter(ds.take(steps+1)) next(it) start = time.time() for i,(images,labels) in enumerate(it): if i%10 == 0: print('.',end='') print() end = time.time() duration = end-start print("{} batches: {} s".format(steps, duration)) print("{:0.5f} Images/s".format(BATCH_SIZE*steps/duration)) print("Total time: {}s".format(end-overall_start)) ``` The performance of the current dataset is: ``` ds = image_label_ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE) ds timeit(ds) ``` ### Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is very efficient, especially when the data fits in memory. Here the images are cached, after being pre-precessed (decoded and resized): ``` ds = image_label_ds.cache() ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE) ds timeit(ds) ``` One disadvantage to using an in memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started: ``` timeit(ds) ``` If the data doesn't fit in memory, use a cache file: ``` ds = image_label_ds.cache(filename='./cache.tf-data') ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE).prefetch(1) ds timeit(ds) ``` The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time: ``` timeit(ds) ``` ### TFRecord File #### Raw image data TFRecord files are a simple format to store a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS. First, build a TFRecord file from the raw image data: ``` image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.io.read_file) tfrec = tf.data.experimental.TFRecordWriter('images.tfrec') tfrec.write(image_ds) ``` Next, build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier: ``` image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image) ``` Zip that dataset with the labels dataset you defined earlier to get the expected `(image,label)` pairs: ``` ds = tf.data.Dataset.zip((image_ds, label_ds)) ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE) ds timeit(ds) ``` This is slower than the `cache` version because you have not cached the preprocessing. #### Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before: ``` paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths) image_ds = paths_ds.map(load_and_preprocess_image) image_ds ``` Now instead of a dataset of `.jpeg` strings, you have a dataset of tensors. To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings: ``` ds = image_ds.map(tf.io.serialize_tensor) ds tfrec = tf.data.experimental.TFRecordWriter('images.tfrec') tfrec.write(ds) ``` With the preprocessing cached, data can be loaded from the TFrecord file quite efficiently - just remember to de-serialize tensor before using it: ``` ds = tf.data.TFRecordDataset('images.tfrec') def parse(x): result = tf.io.parse_tensor(x, out_type=tf.float32) result = tf.reshape(result, [192, 192, 3]) return result ds = ds.map(parse, num_parallel_calls=AUTOTUNE) ds ``` Now, add the labels and apply the same standard operations, as before: ``` ds = tf.data.Dataset.zip((ds, label_ds)) ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE) ds timeit(ds) ```
github_jupyter
``` #hide #skip ! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab # default_exp losses # default_cls_lvl 3 #export from fastai.imports import * from fastai.torch_imports import * from fastai.torch_core import * from fastai.layers import * #hide from nbdev.showdoc import * ``` # Loss Functions > Custom fastai loss functions ``` F.binary_cross_entropy_with_logits(torch.randn(4,5), torch.randint(0, 2, (4,5)).float(), reduction='none') funcs_kwargs # export @log_args class BaseLoss(): "Same as `loss_cls`, but flattens input and target." activation=decodes=noops def __init__(self, loss_cls, *args, axis=-1, flatten=True, floatify=False, is_2d=True, **kwargs): store_attr("axis,flatten,floatify,is_2d") self.func = loss_cls(*args,**kwargs) functools.update_wrapper(self, self.func) def __repr__(self): return f"FlattenedLoss of {self.func}" @property def reduction(self): return self.func.reduction @reduction.setter def reduction(self, v): self.func.reduction = v def __call__(self, inp, targ, **kwargs): inp = inp .transpose(self.axis,-1).contiguous() targ = targ.transpose(self.axis,-1).contiguous() if self.floatify and targ.dtype!=torch.float16: targ = targ.float() if targ.dtype in [torch.int8, torch.int16, torch.int32]: targ = targ.long() if self.flatten: inp = inp.view(-1,inp.shape[-1]) if self.is_2d else inp.view(-1) return self.func.__call__(inp, targ.view(-1) if self.flatten else targ, **kwargs) ``` Wrapping a general loss function inside of `BaseLoss` provides extra functionalities to your loss functions: - flattens the tensors before trying to take the losses since it's more convenient (with a potential tranpose to put `axis` at the end) - a potential `activation` method that tells the library if there is an activation fused in the loss (useful for inference and methods such as `Learner.get_preds` or `Learner.predict`) - a potential <code>decodes</code> method that is used on predictions in inference (for instance, an argmax in classification) The `args` and `kwargs` will be passed to `loss_cls` during the initialization to instantiate a loss function. `axis` is put at the end for losses like softmax that are often performed on the last axis. If `floatify=True`, the `targs` will be converted to floats (useful for losses that only accept float targets like `BCEWithLogitsLoss`), and `is_2d` determines if we flatten while keeping the first dimension (batch size) or completely flatten the input. We want the first for losses like Cross Entropy, and the second for pretty much anything else. ``` # export @log_args @delegates() class CrossEntropyLossFlat(BaseLoss): "Same as `nn.CrossEntropyLoss`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, weight=None, ignore_index=-100, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(nn.CrossEntropyLoss, *args, axis=axis, **kwargs) def decodes(self, x): return x.argmax(dim=self.axis) def activation(self, x): return F.softmax(x, dim=self.axis) tst = CrossEntropyLossFlat() output = torch.randn(32, 5, 10) target = torch.randint(0, 10, (32,5)) #nn.CrossEntropy would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.CrossEntropyLoss()(output,target)) #Associated activation is softmax test_eq(tst.activation(output), F.softmax(output, dim=-1)) #This loss function has a decodes which is argmax test_eq(tst.decodes(output), output.argmax(dim=-1)) #In a segmentation task, we want to take the softmax over the channel dimension tst = CrossEntropyLossFlat(axis=1) output = torch.randn(32, 5, 128, 128) target = torch.randint(0, 5, (32, 128, 128)) _ = tst(output, target) test_eq(tst.activation(output), F.softmax(output, dim=1)) test_eq(tst.decodes(output), output.argmax(dim=1)) # export @log_args @delegates() class BCEWithLogitsLossFlat(BaseLoss): "Same as `nn.BCEWithLogitsLoss`, but flattens input and target." @use_kwargs_dict(keep=True, weight=None, reduction='mean', pos_weight=None) def __init__(self, *args, axis=-1, floatify=True, thresh=0.5, **kwargs): super().__init__(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) self.thresh = thresh def decodes(self, x): return x>self.thresh def activation(self, x): return torch.sigmoid(x) tst = BCEWithLogitsLossFlat() output = torch.randn(32, 5, 10) target = torch.randn(32, 5, 10) #nn.BCEWithLogitsLoss would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) output = torch.randn(32, 5) target = torch.randint(0,2,(32, 5)) #nn.BCEWithLogitsLoss would fail with int targets but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) #Associated activation is sigmoid test_eq(tst.activation(output), torch.sigmoid(output)) # export @log_args(to_return=True) @use_kwargs_dict(weight=None, reduction='mean') def BCELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.BCELoss`, but flattens input and target." return BaseLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = BCELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.BCELoss()(output,target)) # export @log_args(to_return=True) @use_kwargs_dict(reduction='mean') def MSELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.MSELoss`, but flattens input and target." return BaseLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = MSELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.MSELoss()(output,target)) #hide #cuda #Test losses work in half precision output = torch.sigmoid(torch.randn(32, 5, 10)).half().cuda() target = torch.randint(0,2,(32, 5, 10)).half().cuda() for tst in [BCELossFlat(), MSELossFlat()]: _ = tst(output, target) # export @log_args(to_return=True) @use_kwargs_dict(reduction='mean') def L1LossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.L1Loss`, but flattens input and target." return BaseLoss(nn.L1Loss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) #export @log_args class LabelSmoothingCrossEntropy(Module): y_int = True def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction def forward(self, output, target): c = output.size()[-1] log_preds = F.log_softmax(output, dim=-1) if self.reduction=='sum': loss = -log_preds.sum() else: loss = -log_preds.sum(dim=-1) #We divide by that size at the return line so sum and not mean if self.reduction=='mean': loss = loss.mean() return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), reduction=self.reduction) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1) ``` On top of the formula we define: - a `reduction` attribute, that will be used when we call `Learner.get_preds` - an `activation` function that represents the activation fused in the loss (since we use cross entropy behind the scenes). It will be applied to the output of the model when calling `Learner.get_preds` or `Learner.predict` - a <code>decodes</code> function that converts the output of the model to a format similar to the target (here indices). This is used in `Learner.predict` and `Learner.show_results` to decode the predictions ``` #export @log_args @delegates() class LabelSmoothingCrossEntropyFlat(BaseLoss): "Same as `LabelSmoothingCrossEntropy`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, eps=0.1, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(LabelSmoothingCrossEntropy, *args, axis=axis, **kwargs) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1) ``` ## Export - ``` #hide from nbdev.export import * notebook2script() ```
github_jupyter
<img src="images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> # Qiskit Tutorials *** Welcome Qiskitters. The easiest way to get started is to use [the Binder image](https://mybinder.org/v2/gh/qiskit/qiskit-tutorials/master?filepath=index.ipynb), which lets you use the notebooks via the web. This means that you don't need to download or install anything, but is also means that you should not insert any private information into the notebooks (such as your API key). We recommend that after you are done using mybinder that you regenerate your token. The tutorials can be downloaded by clicking [here](https://github.com/Qiskit/qiskit-tutorials/archive/master.zip) and to set them up follow the installation instructions [here](https://github.com/Qiskit/qiskit-tutorial/blob/master/INSTALL.md). *** ## Contents We have organized the tutorials into two sections: ### 1. Qiskit These tutorials aim to explain how to use Qiskit. We assume you have installed Qiskit if not please look at [qiskit.org](http://www.qiskit.org) or the install [documentation](https://github.com/qiskit/qiskit-tutorial/blob/master/INSTALL.md). We've collected a core reference set of notebooks in this section outlining the features of Qiskit. We will be keeping them up to date with the latest Qiskit version, currently 0.7. The focus of this section will be how to use Qiskit and not so much on teaching you about quantum computing. For those interested in learning about quantum computing we recommend the awesome notebooks in the community section. Qiskit is made up of four elements: Terra, Aer, Ignis, and Aqua with each element having its own goal and together they make the full Qiskit framework. #### 1.1 Getting started with Qiskit A central goal of Qiskit is to build a software stack that makes it easy for anyone to use quantum computers. To get developers and researchers going we have a set of tutorials on the basics. * [Getting started with Qiskit](qiskit/basics/getting_started_with_qiskit.ipynb) - how to use Qiskit * [The IBM Q provider](qiskit/basics/the_ibmq_provider.ipynb) - working with the IBM Q devices * [Plotting data in Qiskit](qiskit/basics/plotting_data_in_qiskit.ipynb) - illustrates the different ways of plotting data in Qiskit #### 1.2 Qiskit Terra Terra, the ‘earth’ element, is the foundation on which the rest of the software lies. Terra provides a bedrock for composing quantum programs at the level of circuits and pulses, to optimize them for the constraints of a particular device, and to manage the execution of batches of experiments on remote-access devices. Terra defines the interfaces for a desirable end-user experience, as well as the efficient handling of layers of optimization, pulse scheduling and backend communication. * [Quantum circuits](qiskit/terra/quantum_circuits.ipynb) - gives a summary of the `QuantumCircuit` object * [Visualizing a quantum circuit](qiskit/terra/visualizing_a_quantum_circuit.ipynb) - details on drawing your quantum circuits * [Summary of quantum operations](qiskit/terra/summary_of_quantum_operations.ipynb) - list of quantum operations (gates, reset, measurements) in Qiskit Terra * [Monitoring jobs and backends](qiskit/terra/backend_monitoring_tools.ipynb) - tools for monitoring jobs and backends * [Parallel tools](qiskit/terra/terra_parallel_tools.ipynb) - executing tasks in parallel using `parallel_map` and tracking progress * [Creating a new provider](qiskit/terra/creating_a_provider.ipynb) - a guide to integration of a new provider with Qiskit structures and interfaces #### 1.3 Qiskit Interacitve Plotting and Jupyter Tools To improve the Qiskit user experience we have made many of the visualizations interactive and developed some very cool new job monitoring tools in Jupyter. * [Jupyter tools for Monitoring jobs and backends](qiskit/jupyter/jupyter_backend_tools.ipynb) - Jupyter tools for monitoring jobs and backends #### 1.4 Qiskit Aer Aer, the ‘air’ element, permeates all Qiskit elements. To really speed up development of quantum computers we need better simulators with the ability to model realistic noise processes that occur during computation on actual devices. Aer provides a high-performance simulator framework for studying quantum computing algorithms and applications in the noisy intermediate scale quantum regime. * [Aer provider](qiskit/aer/aer_provider.ipynb) - gives a summary of the Qiskit Aer provider containing the Qasm, statevector, and unitary simulator * [Device noise simulation](qiskit/aer/device_noise_simulation.ipynb) - shows how to use the Qiskit Aer noise module to automatically generate a basic noise model for simulating hardware backends #### 1.5 Qiskit Ignis Ignis, the ‘fire’ element, is dedicated to fighting noise and errors and to forging a new path. This includes better characterization of errors, improving gates, and computing in the presence of noise. Ignis is meant for those who want to design quantum error correction codes, or who wish to study ways to characterize errors through methods such as tomography, or even to find a better way for using gates by exploring dynamical decoupling and optimal control. While we have already released parts of this element as part of libraries in Terra, an official stand-alone release will come soon. For now we have some tutorials for you to explore. * [Relaxation and decoherence](qiskit/ignis/relaxation_and_decoherence.ipynb) - how to measure coherence times on the real quantum hardware * [Quantum state tomography](qiskit/ignis/state_tomography.ipynb) - how to identify a quantum state using state tomography, in which the state is prepared repeatedly and measured in different bases * [Quantum process tomography](qiskit/ignis/process_tomography.ipynb) - using quantum process tomography to reconstruct the behavior of a quantum process and measure its fidelity, i.e., how closely it matches the ideal version #### 1.6 Qiskit Aqua Aqua, the ‘water’ element, is the element of life. To make quantum computing live up to its expectations, we need to find real-world applications. Aqua is where algorithms for NISQ computers are built. These algorithms can be used to build applications for quantum computing. Aqua is accessible to domain experts in chemistry, optimization, AI or finance, who want to explore the benefits of using quantum computers as accelerators for specific computational tasks, without needing to worry about how to translate the problem into the language of quantum machines. * [Chemistry](qiskit/aqua/chemistry/index.ipynb) - using variational quantum eigensolver to experiment with molecular ground-state energy on a quantum computer * [Optimization](qiskit/aqua/optimization/index.ipynb) - using variational quantum eigensolver to experiment with optimization problems (maxcut and traveling salesman problem) on a quantum computer * [Artificial Intelligence](qiskit/aqua/artificial_intelligence/index.ipynb) - using quantum-enhanced support vector machine to experiment with classification problems on a quantum computer * [Finance](qiskit/aqua/finance/index.ipynb) - using variational quantum eigensolver to optimize portfolio on a quantum computer ### 2. Community Notebooks Teaching quantum and qiskit has so many different paths of learning. We love our community and we love the contributions so keep them coming. Because Qiskit is changing so much we can't keep this updated (we will try our best) but there are some great notebooks in here. #### 2.1 [Hello, Quantum World with Qiskit](community/hello_world/) Learn from the community how to write your first quantum program. #### 2.2 [Quantum Games with Qiskit](community/games/) Learn quantum computing by having fun. How is there a better way! #### 2.3 [Quantum Information Science with Qiskit Terra](community/terra/index.ipynb) Learn about and how to program quantum circuits using Qiskit Terra. #### 2.4 [Textbook Quantum Algorithms with Qiskit Terra](community/algorithms/index.ipynb) Learn about textbook quantum algorithms, like Deutsch-Jozsa, Grover, and Shor using Qiskit Terra. #### 2.5 [Developing Quantum Applications with Qiskit Aqua](community/aqua/index.ipynb) Learn how to develop and the fundamentals of quantum applications using Qiskit Aqua #### 2.6 Awards Learn from the great contributions to the [IBM Q Awards](https://qe-awards.mybluemix.net/) * [Teach Me Qiskit 2018](community/awards/teach_me_qiskit_2018/index.ipynb) * [Teach Me Quantum 2018](community/awards/teach_me_quantum_2018/index.ipynb) ``` from IPython.display import display, Markdown with open('index.md', 'r') as readme: content = readme.read(); display(Markdown(content)) ``` *** ## License This project is licensed under the Apache License 2.0 - see the [LICENSE](https://github.com/Qiskit/qiskit-tutorials/blob/master/LICENSE) file for details.
github_jupyter
``` # HIDDEN from datascience import * %matplotlib inline import matplotlib.pyplot as plots plots.style.use('fivethirtyeight') import math import numpy as np from scipy import stats import ipywidgets as widgets import nbinteract as nbi ``` ### The Central Limit Theorem ### Very few of the data histograms that we have seen in this course have been bell shaped. When we have come across a bell shaped distribution, it has almost invariably been an empirical histogram of a statistic based on a random sample. **The Central Limit Theorem says that the probability distribution of the sum or average of a large random sample drawn with replacement will be roughly normal, *regardless of the distribution of the population from which the sample is drawn*.** As we noted when we were studying Chebychev's bounds, results that can be applied to random samples *regardless of the distribution of the population* are very powerful, because in data science we rarely know the distribution of the population. The Central Limit Theorem makes it possible to make inferences with very little knowledge about the population, provided we have a large random sample. That is why it is central to the field of statistical inference. ### Proportion of Purple Flowers ### Recall Mendel's probability model for the colors of the flowers of a species of pea plant. The model says that the flower colors of the plants are like draws made at random with replacement from {Purple, Purple, Purple, White}. In a large sample of plants, about what proportion will have purple flowers? We would expect the answer to be about 0.75, the proportion purple in the model. And, because proportions are means, the Central Limit Theorem says that the distribution of the sample proportion of purple plants is roughly normal. We can confirm this by simulation. Let's simulate the proportion of purple-flowered plants in a sample of 200 plants. ``` colors = make_array('Purple', 'Purple', 'Purple', 'White') model = Table().with_column('Color', colors) model props = make_array() num_plants = 200 repetitions = 1000 for i in np.arange(repetitions): sample = model.sample(num_plants) new_prop = np.count_nonzero(sample.column('Color') == 'Purple')/num_plants props = np.append(props, new_prop) props[:5] opts = { 'title': 'Distribution of sample proportions', 'xlabel': 'Sample Proportion', 'ylabel': 'Percent per unit', 'xlim': (0.64, 0.84), 'ylim': (0, 25), 'bins': 20, } nbi.hist(props, options=opts) ``` There's that normal curve again, as predicted by the Central Limit Theorem, centered at around 0.75 just as you would expect. How would this distribution change if we increased the sample size? We can copy our sampling code into a function and then use interaction to see how the distribution changes as the sample size increases. We will keep the number of `repetitions` the same as before so that the two columns have the same length. ``` def empirical_props(num_plants): props = make_array() for i in np.arange(repetitions): sample = model.sample(num_plants) new_prop = np.count_nonzero(sample.column('Color') == 'Purple')/num_plants props = np.append(props, new_prop) return props nbi.hist(empirical_props, options=opts, num_plants=widgets.ToggleButtons(options=[100, 200, 400, 800])) ``` All of the above distributions are approximately normal but become more narrow as the sample size increases. For example, the proportions based on a sample size of 800 are more tightly clustered around 0.75 than those from a sample size of 200. Increasing the sample size has decreased the variability in the sample proportion.
github_jupyter
# Plotting aggregate variables Pyam offers many great visualisation and analysis tools. In this notebook we highlight the `aggregate` and `stack_plot` methods of an `IamDataFrame`. ``` import numpy as np import pandas as pd import pyam %matplotlib inline import matplotlib.pyplot as plt ``` Here we provide some sample data for this tutorial. This data is for a single model-scenario-region combination but provides multiple subsectors of CO$_2$ emissions. The emissions in the subsectors are both positive and negative and so provide a good test of the flexibility of our aggregation and plotting routines. ``` df = pyam.IamDataFrame(pd.DataFrame([ ['IMG', 'a_scen', 'World', 'Emissions|CO2|Energy|Oil', 'Mt CO2/yr', 2, 3.2, 2.0, 1.8], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Energy|Gas', 'Mt CO2/yr', 1.3, 1.6, 1.0, 0.7], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Energy|BECCS', 'Mt CO2/yr', 0.0, 0.4, -0.4, 0.3], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Cars', 'Mt CO2/yr', 1.6, 3.8, 3.0, 2.5], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Tar', 'Mt CO2/yr', 0.3, 0.35, 0.35, 0.33], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Agg', 'Mt CO2/yr', 0.5, -0.1, -0.5, -0.7], ['IMG', 'a_scen', 'World', 'Emissions|CO2|LUC', 'Mt CO2/yr', -0.3, -0.6, -1.2, -1.0] ], columns=['model', 'scenario', 'region', 'variable', 'unit', 2005, 2010, 2015, 2020], )) df.head() ``` Pyam's `stack_plot` method plots the stacks in the clearest way possible, even when some emissions are negative. The optional `total` keyword arguments also allows the user to include a total line on their plot. ``` df.stack_plot(); df.stack_plot(total=True); ``` The appearance of the stackplot can be simply controlled via ``kwargs``. The appearance of the total line is controlled by passing a dictionary to the `total_kwargs` keyword argument. ``` df.stack_plot(alpha=0.5, total={"color": "grey", "ls": "--", "lw": 2.0}); ``` If the user wishes, they can firstly filter their data before plotting. ``` df.filter(variable="Emissions|CO2|Energy*").stack_plot(total=True); ``` Using `aggregate`, it is possible to create arbitrary sums of sub-sectors before plotting. ``` pdf = df.copy() afoluluc_vars = ["Emissions|CO2|LUC", "Emissions|CO2|Agg"] fossil_vars = list(set(pdf.variables()) - set(afoluluc_vars)) pdf.aggregate( "Emissions|CO2|AFOLULUC", components=afoluluc_vars, append=True ) pdf.aggregate( "Emissions|CO2|Fossil", components=fossil_vars, append=True ) pdf.filter(variable=[ "Emissions|CO2|AFOLULUC", "Emissions|CO2|Fossil" ]).stack_plot(total=True); ```
github_jupyter
Author: Saeed Amen (@thalesians) - Managing Director & Co-founder of [the Thalesians](http://www.thalesians.com) ## Introduction With the UK general election in early May 2015, we thought it would be a fun exercise to demonstrate how you can investigate market price action over historial elections. We shall be using Python, together with Plotly for plotting. Plotly is a free web-based platform for making graphs. You can keep graphs private, make them public, and run Plotly on your [Plotly Enterprise on your own servers](https://plot.ly/product/enterprise/). You can find more details [here](https://plot.ly/python/getting-started/). ## Getting market data with Bloomberg To get market data, we shall be using Bloomberg. As a starting point, we have used bbg_py from [Brian Smith's TIA project](https://github.com/bpsmith/tia/tree/master/tia/bbg), which allows you to access Bloomberg via COM (older method), modifying it to make it compatible for Python 3.4. Whilst, we shall note use it to access historical daily data, there are functions which enable us to download intraday data. This method is only compatible with 32 bit versions of Python and assumes you are running the code on a Bloomberg terminal (it won't work without a valid Bloomberg licence). In my opinion a better way to access Bloomberg via Python, is via the official Bloomberg open source Python Open Source Graphing Library, however, at time of writing the official version is not yet compatible with Python 3.4. Fil Mackay has created a Python 3.4 compatible version of this [here](https://github.com/filmackay/blpapi-py), which I have used successfully. Whilst it takes slightly more time to configure (and compile using Windows SDK 7.1), it has the benefit of being compatible with 64 bit Python, which I have found invaluable in my analysis (have a read of [this](http://ta.speot.is/2012/04/09/visual-studio-2010-sp1-windows-sdk-7-1-install-order/) in case of failed installations of Windows SDK 7.1). Quandl can be used as an alternative data source, if you don't have access to a Bloomberg terminal, which I have also included in the code. ## Breaking down the steps in Python Our project will consist of several parts: - bbg_com - low level interaction with BBG COM object (adapted for Python 3.4) (which we are simply calling) - datadownloader - wrapper for BBG COM, Quandl and CSV access to data - eventplot - reusuable functions for interacting with Plotly and creating event studies - ukelection - kicks off the whole script process ### Downloading the market data As with any sort of financial market analysis, the first step is obtaining market data. We create the DataDownloader class, which acts a wrapper for Bloomberg, Quandl and CSV market data. We write a single function "download_time_series" for this. We could of course extend this for other data sources such as Yahoo Finance. Our output will be Pandas based dataframes. We want to make this code generic, so the tickers are not hard coded. ``` # for time series manipulation import pandas class DataDownloader: def download_time_series(self, vendor_ticker, pretty_ticker, start_date, source, csv_file = None): if source == 'Quandl': import Quandl # Quandl requires API key for large number of daily downloads # https://www.quandl.com/help/api spot = Quandl.get(vendor_ticker) # Bank of England's database on Quandl spot = pandas.DataFrame(data=spot['Value'], index=spot.index) spot.columns = [pretty_ticker] elif source == 'Bloomberg': from bbg_com import HistoricalDataRequest req = HistoricalDataRequest([vendor_ticker], ['PX_LAST'], start = start_date) req.execute() spot = req.response_as_single() spot.columns = [pretty_ticker] elif source == 'CSV': dateparse = lambda x: pandas.datetime.strptime(x, '%Y-%m-%d') # in case you want to use a source other than Bloomberg/Quandl spot = pandas.read_csv(csv_file, index_col=0, parse_dates=0, date_parser=dateparse) return spot ``` ### Generic functions for event study and Plotly plotting We now focus our efforts on the EventPlot class. Here we shall do our basic analysis. We shall aslo create functions for creating plotly traces and layouts that we shall reuse a number of times. The analysis we shall conduct is fairly simple. Given a time series of spot, and a number of dates, we shall create an event study around these times for that asset. We also include the "Mean" move over all the various dates. ``` # for dates import datetime # time series manipulation import pandas # for plotting data import plotly from plotly.graph_objs import * class EventPlot: def event_study(self, spot, dates, pre, post, mean_label = 'Mean'): # event_study - calculates the asset price moves over windows around event days # # spot = price of asset to study # dates = event days to anchor our event study # pre = days before the event day to start our study # post = days after the event day to start our study # data_frame = pandas.DataFrame() # for each date grab spot data the days before and after for i in range(0, len(dates)): mid_index = spot.index.searchsorted(dates[i]) start_index = mid_index + pre finish_index = mid_index + post + 1 x = (spot.ix[start_index:finish_index])[spot.columns.values[0]] data_frame[dates[i]] = x.values data_frame.index = range(pre, post + 1) data_frame = data_frame / data_frame.shift(1) - 1 # returns # add the mean on to the end data_frame[mean_label] = data_frame.mean(axis=1) data_frame = 100.0 * (1.0 + data_frame).cumprod() # index data_frame.ix[pre,:] = 100 return data_frame ``` We write a function to convert dates represented in a string format to Python format. ``` def parse_dates(self, str_dates): # parse_dates - parses string dates into Python format # # str_dates = dates to be parsed in the format of day/month/year # dates = [] for d in str_dates: dates.append(datetime.datetime.strptime(d, '%d/%m/%Y')) return dates EventPlot.parse_dates = parse_dates ``` Our next focus is on the Plotly functions which create a layout. This enables us to specify axes labels, the width and height of the final plot and so on. We could of course add further properties into it. ``` def create_layout(self, title, xaxis, yaxis, width = -1, height = -1): # create_layout - populates a layout object # title = title of the plot # xaxis = xaxis label # yaxis = yaxis label # width (optional) = width of plot # height (optional) = height of plot # layout = Layout( title = title, xaxis = plotly.graph_objs.XAxis( title = xaxis, showgrid = False ), yaxis = plotly.graph_objs.YAxis( title= yaxis, showline = False ) ) if width > 0 and height > 0: layout['width'] = width layout['height'] = height return layout EventPlot.create_layout = create_layout ``` Earlier, in the DataDownloader class, our output was Pandas based dataframes. Our convert_df_plotly function will convert these each series from Pandas dataframe into plotly traces. Along the way, we shall add various properties such as markers with varying levels of opacity, graduated coloring of lines (which uses colorlover) and so on. ``` def convert_df_plotly(self, dataframe, axis_no = 1, color_def = ['default'], special_line = 'Mean', showlegend = True, addmarker = False, gradcolor = None): # convert_df_plotly - converts a Pandas data frame to Plotly format for line plots # dataframe = data frame due to be converted # axis_no = axis for plot to be drawn (default = 1) # special_line = make lines named this extra thick # color_def = color scheme to be used (default = ['default']), colour will alternate in the list # showlegend = True or False to show legend of this line on plot # addmarker = True or False to add markers # gradcolor = Create a graduated color scheme for the lines # # Also see http://nbviewer.ipython.org/gist/nipunreddevil/7734529 for converting dataframe to traces # Also see http://moderndata.plot.ly/color-scales-in-ipython-notebook/ x = dataframe.index.values traces = [] # will be used for market opacity for the markers increments = 0.95 / float(len(dataframe.columns)) if gradcolor is not None: try: import colorlover as cl color_def = cl.scales[str(len(dataframe.columns))]['seq'][gradcolor] except: print('Check colorlover installation...') i = 0 for key in dataframe: scatter = plotly.graph_objs.Scatter( x = x, y = dataframe[key].values, name = key, xaxis = 'x' + str(axis_no), yaxis = 'y' + str(axis_no), showlegend = showlegend) # only apply color/marker properties if not "default" if color_def[i % len(color_def)] != "default": if special_line in str(key): # special case for lines labelled "mean" # make line thicker scatter['mode'] = 'lines' scatter['line'] = plotly.graph_objs.Line( color = color_def[i % len(color_def)], width = 2 ) else: line_width = 1 # set properties for the markers which change opacity # for markers make lines thinner if addmarker: opacity = 0.05 + (increments * i) scatter['mode'] = 'markers+lines' scatter['marker'] = plotly.graph_objs.Marker( color=color_def[i % len(color_def)], # marker color opacity = opacity, size = 5) line_width = 0.2 else: scatter['mode'] = 'lines' scatter['line'] = plotly.graph_objs.Line( color = color_def[i % len(color_def)], width = line_width) i = i + 1 traces.append(scatter) return traces EventPlot.convert_df_plotly = convert_df_plotly ``` ### UK election analysis We've now created several generic functions for downloading data, doing an event study and also for helping us out with plotting via Plotly. We now start work on the ukelection.py script, for pulling it all together. As a very first step we need to provide credentials for Plotly (you can get your own Plotly key and username [here](https://plot.ly/python/getting-started/)). ``` # for time series/maths import pandas # for plotting data import plotly import plotly.plotly as py from plotly.graph_objs import * def ukelection(): # Learn about API authentication here: https://plot.ly/python/getting-started # Find your api_key here: https://plot.ly/settings/api plotly_username = "thalesians" plotly_api_key = "XXXXXXXXX" plotly.tools.set_credentials_file(username=plotly_username, api_key=plotly_api_key) ``` Let's download our market data that we need (GBP/USD spot data) using the DataDownloader class. As a default, I've opted to use Bloomberg data. You can try other currency pairs or markets (for example FTSE), to compare results for the event study. Note that obviously each data vendor will have a different ticker in their system for what could well be the same asset. With FX, care must be taken to know which close the vendor is snapping. As a default we have opted for BGN, which for GBP/USD is the NY close value. ``` ticker = 'GBPUSD' # will use in plot titles later (and for creating Plotly URL) ##### download market GBP/USD data from Quandl, Bloomberg or CSV file source = "Bloomberg" # source = "Quandl" # source = "CSV" csv_file = None event_plot = EventPlot() data_downloader = DataDownloader() start_date = event_plot.parse_dates(['01/01/1975']) if source == 'Quandl': vendor_ticker = "BOE/XUDLUSS" elif source == 'Bloomberg': vendor_ticker = 'GBPUSD BGN Curncy' elif source == 'CSV': vendor_ticker = 'GBPUSD' csv_file = 'D:/GBPUSD.csv' spot = data_downloader.download_time_series(vendor_ticker, ticker, start_date[0], source, csv_file = csv_file) ``` The most important part of the study is getting the historical UK election dates! We can obtain these from Wikipedia. We then convert into Python format. We need to make sure we filter the UK election dates, for where we have spot data available. ``` labour_wins = ['28/02/1974', '10/10/1974', '01/05/1997', '07/06/2001', '05/05/2005'] conservative_wins = ['03/05/1979', '09/06/1983', '11/06/1987', '09/04/1992', '06/05/2010'] # convert to more easily readable format labour_wins_d = event_plot.parse_dates(labour_wins) conservative_wins_d = event_plot.parse_dates(conservative_wins) # only takes those elections where we have data labour_wins_d = [d for d in labour_wins_d if d > spot.index[0].to_pydatetime()] conservative_wins_d = [d for d in conservative_wins_d if d > spot.index[0].to_pydatetime()] spot.index.name = 'Date' ``` We then call our event study function in EventPlot on our spot data, which compromises of the 20 days before up till the 20 days after the UK general election. We shall plot these lines later. ``` # number of days before and after for our event study pre = -20 post = 20 # calculate spot path during Labour wins labour_wins_spot = event_plot.event_study(spot, labour_wins_d, pre, post, mean_label = 'Labour Mean') # calculate spot path during Conservative wins conservative_wins_spot = event_plot.event_study(spot, conservative_wins_d, pre, post, mean_label = 'Conservative Mean') ``` Define our xaxis and yaxis labels, as well as our source, which we shall later include in the title. ``` ##### Create separate plots of price action during Labour and Conservative wins xaxis = 'Days' yaxis = 'Index' source_label = "Source: @thalesians/BBG/Wikipedia" ``` We're finally ready for our first plot! We shall plot GBP/USD moves over Labour election wins, using the default palette and then we shall embed it into the sheet, using the URL given to us from the Plotly website. ``` ###### Plot market reaction during Labour UK election wins ###### Using default color scheme title = ticker + ' during UK gen elect - Lab wins' + '<BR>' + source_label fig = Figure(data=event_plot.convert_df_plotly(labour_wins_spot), layout=event_plot.create_layout(title, xaxis, yaxis) ) py.iplot(fig, filename='labour-wins-' + ticker) ``` The "iplot" function will send it to Plotly's server (provided we have all the dependencies installed). Alternatively, we could embed the HTML as an image, which we have taken from the Plotly website. Note this approach will yield a static image which is fetched from Plotly's servers. It also possible to write the image to disk. Later we shall show the embed function. <div> <a href="https://plot.ly/~thalesians/244/" target="_blank" title="GBPUSD during UK gen elect - Lab wins&lt;br&gt;Source: @thalesians/BBG/Wikipedia" style="display: block; text-align: center;"><img src="https://plot.ly/~thalesians/244.png" alt="GBPUSD during UK gen elect - Lab wins&lt;br&gt;Source: @thalesians/BBG/Wikipedia" style="max-width: 100%;" onerror="this.onerror=null;this.src='https://plot.ly/404.png';" /></a> <script data-plotly="thalesians:244" src="https://plot.ly/embed.js" async></script> </div> We next plot GBP/USD over Conservative wins. In this instance, however, we have a graduated 'Blues' color scheme, given obviously that blue is the color of the Conserative party in the UK! ``` ###### Plot market reaction during Conservative UK election wins ###### Using varying shades of blue for each line (helped by colorlover library) title = ticker + ' during UK gen elect - Con wins ' + '<BR>' + source_label # also apply graduated color scheme of blues (from light to dark) # see http://moderndata.plot.ly/color-scales-in-ipython-notebook/ for details on colorlover package # which allows you to set scales fig = Figure(data=event_plot.convert_df_plotly(conservative_wins_spot, gradcolor='Blues', addmarker=False), layout=event_plot.create_layout(title, xaxis, yaxis), ) plot_url = py.iplot(fig, filename='conservative-wins-' + ticker) ``` Embed the chart into the document using "embed". This essentially embeds the Javascript code, necessary to make it interactive. ``` import plotly.tools as tls tls.embed("https://plot.ly/~thalesians/245") ``` Our final plot, will consist of three subplots, Labour wins, Conservative wins, and average moves for both. We also add a grid and a grey background for each plot. ``` ##### Plot market reaction during Conservative UK election wins ##### create a plot consisting of 3 subplots (from left to right) ##### 1. Labour wins, 2. Conservative wins, 3. Conservative/Labour mean move # create a dataframe which grabs the mean from the respective Lab & Con election wins mean_wins_spot = pandas.DataFrame() mean_wins_spot['Labour Mean'] = labour_wins_spot['Labour Mean'] mean_wins_spot['Conservative Mean'] = conservative_wins_spot['Conservative Mean'] fig = plotly.tools.make_subplots(rows=1, cols=3) # apply different color scheme (red = Lab, blue = Con) # also add markets, which will have varying levels of opacity fig['data'] += Data( event_plot.convert_df_plotly(conservative_wins_spot, axis_no=1, color_def=['blue'], addmarker=True) + event_plot.convert_df_plotly(labour_wins_spot, axis_no=2, color_def=['red'], addmarker=True) + event_plot.convert_df_plotly(mean_wins_spot, axis_no=3, color_def=['red', 'blue'], addmarker=True, showlegend = False) ) fig['layout'].update(title=ticker + ' during UK gen elects by winning party ' + '<BR>' + source_label) # use the scheme from https://plot.ly/python/bubble-charts-tutorial/ # can use dict approach, rather than specifying each separately axis_style = dict( gridcolor='#FFFFFF', # white grid lines ticks='outside', # draw ticks outside axes ticklen=8, # tick length tickwidth=1.5 # and width ) # create the various axes for the three separate charts fig['layout'].update(xaxis1=plotly.graph_objs.XAxis(axis_style, title=xaxis)) fig['layout'].update(yaxis1=plotly.graph_objs.YAxis(axis_style, title=yaxis)) fig['layout'].update(xaxis2=plotly.graph_objs.XAxis(axis_style, title=xaxis)) fig['layout'].update(yaxis2=plotly.graph_objs.YAxis(axis_style)) fig['layout'].update(xaxis3=plotly.graph_objs.XAxis(axis_style, title=xaxis)) fig['layout'].update(yaxis3=plotly.graph_objs.YAxis(axis_style)) fig['layout'].update(plot_bgcolor='#EFECEA') # set plot background to grey plot_url = py.iplot(fig, filename='labour-conservative-wins-'+ ticker + '-subplot') ``` This time we use "embed", which grab the plot from Plotly's server, we did earlier (given we have already uploaded it). ``` import plotly.tools as tls tls.embed("https://plot.ly/~thalesians/246") ``` <B>That's about it!</B> I hope the code I've written proves fruitful for creating some very cool Plotly plots and also for doing some very timely analysis ahead of the UK general election! Hoping this will be first of many blogs on using Plotly data. The analysis in this blog is based on a report I wrote for Thalesians, a quant finance thinktank. If you are interested in getting access to the full copy of the report (Thalesians: My kingdom for a vote - The definitive quant guide to UK general elections), feel free to e-mail me at <b>[email protected]</b> or tweet me <b>@thalesians</b> ## Want to hear more about global macro and UK election developments? If you're interested in FX and the UK general election, come to our Thalesians panel in London on April 29th 2015 at 7.30pm in Canary Wharf, which will feature, Eric Burroughs (Reuters - FX Buzz Editor), Mark Cudmore (Bloomberg - First Word EM Strategist), Jordan Rochester (Nomura - FX strategist), Jeremy Wilkinson-Smith (Independent FX trader) and myself as the moderator. Tickets are available [here](http://www.meetup.com/thalesians/events/221147156/) ## Biography <b>Saeed Amen</b> is the managing director and co-founder of the Thalesians. He has a decade of experience creating and successfully running systematic trading models at Lehman Brothers, Nomura and now at the Thalesians. Independently, he runs a systematic trading model with proprietary capital. He is the author of Trading Thalesians – What the ancient world can teach us about trading today (Palgrave Macmillan). He graduated with a first class honours master’s degree from Imperial College in Mathematics & Computer Science. He is also a fan of Python and has written an extensive library for financial market backtesting called PyThalesians. <BR> Follow the Thalesians on Twitter @thalesians and get my book on Amazon [here](http://www.amazon.co.uk/Trading-Thalesians-Saeed-Amen/dp/113739952X) All the code here is available to download from the [Thalesians GitHub page](https://github.com/thalesians/pythalesians) ``` from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install publisher --upgrade import publisher publisher.publish( 'ukelectionbbg.ipynb', 'ipython-notebooks/ukelectionbbg/', 'Plotting GBP/USD price action around UK general elections', 'Create interactive graphs with market data, IPython Notebook and Plotly', name='Plot MP Action in GBP/USD around UK General Elections') ```
github_jupyter
# Introduction to Bayesian Optimization with GPyOpt ### Written by Javier Gonzalez, Amazon Research Cambridge *Last updated Monday, 22 May 2017.* ===================================================================================================== 1. **How to use GPyOpt?** 2. **The Basics of Bayesian Optimization** 1. Gaussian Processes 2. Acquisition functions 3. Applications of Bayesian Optimization 3. **1D optimization example** 4. **2D optimization example** ===================================================================================================== ## 1. How to use GPyOpt? We start by loading GPyOpt and GPy. ``` %pylab inline import GPy import GPyOpt from numpy.random import seed import matplotlib ``` GPyOpt is easy to use as a black-box functions optimizer. To start you only need: * Your favorite function $f$ to minimize. We use $f(x)=2x^2$ in this toy example, whose global minimum is at x=0. ``` def myf(x): return (2*x)**2 ``` * A set of box constrains, the interval [-1,1] in our case. You can define a list of dictionaries where each element defines the name, type and domain of the variables. ``` bounds = [{'name': 'var_1', 'type': 'continuous', 'domain': (-1,1)}] ``` * A budget, or number of allowed evaluations of $f$. ``` max_iter = 15 ``` With this three pieces of information GPyOpt has enough to find the minimum of $f$ in the selected region. GPyOpt solves the problem in two steps. First, you need to create a GPyOpt object that stores the problem (f and and box-constrains). You can do it as follows. ``` myProblem = GPyOpt.methods.BayesianOptimization(myf,bounds) ``` Next you need to run the optimization for the given budget of iterations. This bit it is a bit slow because many default options are used. In the next notebooks of this manual you can learn how to change other parameters to optimize the optimization performance. ``` myProblem.run_optimization(max_iter) ``` Now you can check the best found location $x^*$ by ``` myProblem.x_opt ``` and the predicted value value of $f$ at $x^*$ optimum by ``` myProblem.fx_opt ``` And that's it! Keep reading to learn how GPyOpt uses Bayesian Optimization to solve this an other optimization problem. You will also learn all the features and options that you can use to solve your problems efficiently. ===================================================================================================== ## 2. The Basics of Bayesian Optimization Bayesian optimization (BO) is an strategy for global optimization of black-box functions [(Snoek et al., 2012)](http://papers.nips.cc/paper/4522-practical-bayesian-optimization-of-machine-learning-algorithms.pdf). Let $f: {\mathcal X} \to R$ be a L-Lipschitz continuous function defined on a compact subset ${\mathcal X} \subseteq R^d$. We are interested in solving the global optimization problem of finding $$ x_{M} = \arg \min_{x \in {\mathcal X}} f(x). $$ We assume that $f$ is a *black-box* from which only perturbed evaluations of the type $y_i = f(x_i) + \epsilon_i$, with $\epsilon_i \sim\mathcal{N}(0,\psi^2)$, are available. The goal is to make a series of $x_1,\dots,x_N$ evaluations of $f$ such that the *cumulative regret* $$r_N= Nf(x_{M})- \sum_{n=1}^N f(x_n),$$ is minimized. Essentially, $r_N$ is minimized if we start evaluating $f$ at $x_{M}$ as soon as possible. There are two crucial bits in any Bayesian Optimization (BO) procedure approach. 1. Define a **prior probability measure** on $f$: this function will capture the our prior beliefs on $f$. The prior will be updated to a 'posterior' using the available data. 2. Define an **acquisition function** $acqu(x)$: this is a criteria to decide where to sample next in order to gain the maximum information about the location of the global maximum of $f$. Every time a new data point is collected. The model is re-estimated and the acquisition function optimized again until convergence. Given a prior over the function $f$ and an acquisition function, a BO procedure will converge to the optimum of $f$ under some conditions [(Bull, 2011)](http://arxiv.org/pdf/1101.3501.pdf). ### 2.1 Prior probability meassure on $f$: Gaussian processes A Gaussian process (GP) is a probability distribution across classes functions, typically smooth, such that each linear finite-dimensional restriction is multivariate Gaussian [(Rasmussen and Williams, 2006)](http://www.gaussianprocess.org/gpml). GPs are fully parametrized by a mean $\mu(x)$ and a covariance function $k(x,x')$. Without loss of generality $\mu(x)$ is assumed to be zero. The covariance function $k(x,x')$ characterizes the smoothness and other properties of $f$. It is known as the kernel of the process and has to be continuous, symmetric and positive definite. A widely used kernel is the square exponential, given by $$ k(x,x') = l \cdot \exp{ \left(-\frac{\|x-x'\|^2}{2\sigma^2}\right)} $$ where $\sigma^2$ and and $l$ are positive parameters. To denote that $f$ is a sample from a GP with mean $\mu$ and covariance $k$ we write $$f(x) \sim \mathcal{GP}(\mu(x),k(x,x')).$$ For regression tasks, the most important feature of GPs is that process priors are conjugate to the likelihood from finitely many observations $y= (y_1,\dots,y_n)^T$ and $X =\{x_1,...,x_n\}$, $x_i\in \mathcal{X}$ of the form $y_i = f(x_i) + \epsilon_i $ where $\epsilon_i \sim \mathcal{N} (0,\sigma^2)$. We obtain the Gaussian posterior posterior $f(x^*)|X, y, \theta \sim \mathcal{N}(\mu(x^*),\sigma^2(x^*))$, where $\mu(x^*)$ and $\sigma^2(x^*)$ have close form. See [(Rasmussen and Williams, 2006)](http://www.gaussianprocess.org/gpml) for details. ### 2.2 Acquisition Function Acquisition functions are designed represents our beliefs over the maximum of $f(x)$. Denote by $\theta$ the parameters of the GP model and by $\{x_i,y_i\}$ the available sample. Three of the most common acquisition functions, all available in GPyOpt are: * **Maximum probability of improvement (MPI)**: $$acqu_{MPI}(x;\{x_n,y_n\},\theta) = \Phi(\gamma(x)), \mbox{where}\ \gamma(x)=\frac{\mu(x;\{x_n,y_n\},\theta)-f(x_{best})-\psi}{\sigma(x;\{x_n,y_n\},\theta)}.$$ * **Expected improvement (EI)**: $$acqu_{EI}(x;\{x_n,y_n\},\theta) = \sigma(x;\{x_n,y_n\},\theta) (\gamma(x) \Phi(\gamma(x))) + N(\gamma(x);0,1).$$ * **Upper confidence bound (UCB)**: $$acqu_{UCB}(x;\{x_n,y_n\},\theta) = -\mu(x;\{x_n,y_n\},\theta)+\psi\sigma(x;\{x_n,y_n\},\theta).$$ $\psi$ is a tunable parameters that help to make the acquisition functions more flexible. Also, in the case of the UBC, the parameter $\eta$ is useful to define the balance between the importance we give to the mean and the variance of the model. This is know as the **exploration/exploitation trade off**. ### 2.3 Applications of Bayesian Optimization Bayesian Optimization has been applied to solve a wide range of problems. Among many other, some nice applications of Bayesian Optimization include: * Sensor networks (http://www.robots.ox.ac.uk/~parg/pubs/ipsn673-garnett.pdf), * Automatic algorithm configuration (http://www.cs.ubc.ca/labs/beta/Projects/SMAC/papers/11-LION5-SMAC.pdf), * Deep learning (http://www.mlss2014.com/files/defreitas_slides1.pdf), * Gene design (http://bayesopt.github.io/papers/paper5.pdf), * and a long etc! In this Youtube video you can see Bayesian Optimization working in a real time in a robotics example. [(Calandra1 et al. 2008)](http://www.ias.tu-darmstadt.de/uploads/Site/EditPublication/Calandra_LION8.pdf) ``` from IPython.display import YouTubeVideo YouTubeVideo('ualnbKfkc3Q') ``` ## 3. One dimensional example In this example we show how GPyOpt works in a one-dimensional example a bit more difficult that the one we analyzed in Section 3. Let's consider here the Forrester function $$f(x) =(6x-2)^2 \sin(12x-4)$$ defined on the interval $[0, 1]$. The minimum of this function is located at $x_{min}=0.78$. The Forrester function is part of the benchmark of functions of GPyOpt. To create the true function, the perturbed version and boundaries of the problem you need to run the following cell. ``` %pylab inline import GPy import GPyOpt # Create the true and perturbed Forrester function and the boundaries of the problem f_true= GPyOpt.objective_examples.experiments1d.forrester() # noisy version bounds = [{'name': 'var_1', 'type': 'continuous', 'domain': (0,1)}] # problem constrains ``` We plot the true Forrester function. ``` f_true.plot() ``` As we did in Section 3, we need to create the GPyOpt object that will run the optimization. We specify the function, the boundaries and we add the type of acquisition function to use. ``` # Creates GPyOpt object with the model and anquisition fucntion seed(123) myBopt = GPyOpt.methods.BayesianOptimization(f=f_true.f, # function to optimize domain=bounds, # box-constrains of the problem acquisition_type='EI', exact_feval = True) # Selects the Expected improvement ``` Now we want to run the optimization. Apart from the number of iterations you can select how do you want to optimize the acquisition function. You can run a number of local optimizers (acqu_optimize_restart) at random or in grid (acqu_optimize_method). ``` # Run the optimization max_iter = 15 # evaluation budget max_time = 60 # time budget eps = 10e-6 # Minimum allows distance between the las two observations myBopt.run_optimization(max_iter, max_time, eps) ``` When the optimization is done you should receive a message describing if the method converged or if the maximum number of iterations was reached. In one dimensional examples, you can see the result of the optimization as follows. ``` myBopt.plot_acquisition() myBopt.plot_convergence() ``` In problems of any dimension two evaluations plots are available. * The distance between the last two observations. * The value of $f$ at the best location previous to each iteration. To see these plots just run the following cell. ``` myBopt.plot_convergence() ``` Now let's make a video to track what the algorithm is doing in each iteration. Let's use the LCB in this case with parameter equal to 2. ## 4. Two dimensional example Next, we try a 2-dimensional example. In this case we minimize the use the Six-hump camel function $$f(x_1,x_2) = \left(4-2.1x_1^2 = \frac{x_1^4}{3} \right)x_1^2 + x_1x_2 + (-4 +4x_2^2)x_2^2,$$ in $[-3,3]\times [-2,2]$. This functions has two global minimum, at $(0.0898,-0.7126)$ and $(-0.0898,0.7126)$. As in the previous case we create the function, which is already in GPyOpt. In this case we generate observations of the function perturbed with white noise of $sd=0.1$. ``` # create the object function f_true = GPyOpt.objective_examples.experiments2d.sixhumpcamel() f_sim = GPyOpt.objective_examples.experiments2d.sixhumpcamel(sd = 0.1) bounds =[{'name': 'var_1', 'type': 'continuous', 'domain': f_true.bounds[0]}, {'name': 'var_2', 'type': 'continuous', 'domain': f_true.bounds[1]}] f_true.plot() ``` We create the GPyOpt object. In this case we use the Lower Confidence bound acquisition function to solve the problem. ``` # Creates three identical objects that we will later use to compare the optimization strategies myBopt2D = GPyOpt.methods.BayesianOptimization(f_sim.f, domain=bounds, model_type = 'GP', acquisition_type='EI', normalize_Y = True, acquisition_weight = 2) ``` We run the optimization for 40 iterations and show the evaluation plot and the acquisition function. ``` # runs the optimization for the three methods max_iter = 40 # maximum time 40 iterations max_time = 60 # maximum time 60 seconds myBopt2D.run_optimization(max_iter,max_time,verbosity=False) ``` Finally, we plot the acquisition function and the convergence plot. ``` myBopt2D.plot_acquisition() myBopt2D.plot_convergence() ```
github_jupyter
<a href="https://colab.research.google.com/github/Cloblak/aipi540_deeplearning/blob/main/1D_CNN_Attempts/1D_CNN_asof_111312FEB.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !pip install alpaca_trade_api ``` Features To Consider - Targets are only predicting sell within market hours, i.e. at 1530, target is prediciting price for 1100 the next day. Data from pre and post market is taken into consideration, and a sell or buy will be indicated if the price will flucuate after close. ``` # Import Dependencies import numpy as np import pandas as pd import torch from torch.utils.data import DataLoader, TensorDataset from torch.autograd import Variable from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout from torch.optim import Adam, SGD from torch.utils.data import DataLoader, TensorDataset from torch.utils.tensorboard import SummaryWriter from torchsummary import summary import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from tqdm.notebook import tqdm import alpaca_trade_api as tradeapi from datetime import datetime, timedelta, tzinfo, timezone, time import os.path import ast import threading import math import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler import warnings random_seed = 182 torch.manual_seed(random_seed) PAPER_API_KEY = "PKE39LILN9SL1FMJMFV7" PAPER_SECRET_KEY = "TkU7fXH6WhP15MewgWlSnQG5RUoHGOPQ7yqlD6xq" PAPER_BASE_URL = 'https://paper-api.alpaca.markets' api = tradeapi.REST(PAPER_API_KEY, PAPER_SECRET_KEY, PAPER_BASE_URL, api_version='v2') def prepost_train_test_validate_offset_data(api, ticker, interval, train_days=180, test_days=60, validate_days=30, offset_days = 0): ticker_data_dict = None ticker_data_dict = {} monthly_data_dict = None monthly_data_dict = {} interval_loop_data = None interval_loop_data = pd.DataFrame() stock_data = None days_to_collect = train_days + test_days + validate_days + offset_days TZ = 'US/Eastern' start = pd.to_datetime((datetime.now() - timedelta(days=days_to_collect)).strftime("%Y-%m-%d %H:%M"), utc=True) end = pd.to_datetime(datetime.now().strftime("%Y-%m-%d %H:%M"), utc=True) stock_data = api.get_bars(ticker, interval, start = start.isoformat(), end=end.isoformat(), adjustment="raw").df interval_loop_data = interval_loop_data.append(stock_data) df_start_ref = interval_loop_data.index[0] start_str_ref = pd.to_datetime(start, utc=True) while start_str_ref.value < ( pd.to_datetime(df_start_ref, utc=True) - pd.Timedelta(days=2.5)).value: end_new = pd.to_datetime(interval_loop_data.index[0].strftime("%Y-%m-%d %H:%M"), utc=True).isoformat() stock_data_new = None stock_data_new = api.get_bars(ticker, interval, start=start, end=end_new, adjustment="raw").df #stock_data_new = stock_data_new.reset_index() interval_loop_data = interval_loop_data.append(stock_data_new).sort_values(by=['index'], ascending=True) df_start_ref = interval_loop_data.index[0] stock_yr_min_df = interval_loop_data.copy() stock_yr_min_df["Open"] = stock_yr_min_df['open'] stock_yr_min_df["High"]= stock_yr_min_df["high"] stock_yr_min_df["Low"] = stock_yr_min_df["low"] stock_yr_min_df["Close"] = stock_yr_min_df["close"] stock_yr_min_df["Volume"] = stock_yr_min_df["volume"] stock_yr_min_df["VolumeWeightedAvgPrice"] = stock_yr_min_df["vwap"] stock_yr_min_df["Time"] = stock_yr_min_df.index.tz_convert(TZ) stock_yr_min_df.index = stock_yr_min_df.index.tz_convert(TZ) final_df = stock_yr_min_df.filter(["Time", "Open", "High", "Low", "Close", "Volume", "VolumeWeightedAvgPrice"], axis = 1) first_day = final_df.index[0] traintest_day = final_df.index[-1] - pd.Timedelta(days= test_days+validate_days+offset_days) valtest_day = final_df.index[-1] - pd.Timedelta(days= test_days+offset_days) last_day = final_df.index[-1] - pd.Timedelta(days= offset_days) training_df = final_df.loc[first_day:traintest_day] #(data_split - pd.Timedelta(days=1))] validate_df = final_df.loc[traintest_day:valtest_day] testing_df = final_df.loc[valtest_day:last_day] full_train = final_df.loc[first_day:last_day] offset_df = final_df.loc[last_day:] return training_df, validate_df, testing_df, full_train, offset_df, final_df, traintest_day, valtest_day from datetime import date train_start = date(2017, 1, 1) train_end = date(2020, 3, 29) train_delta = train_end - train_start print(f'Number of days of Training Data {train_delta.days}') val_day_num = 400 print(f'Number of days of Validation Data {val_day_num}') test_start = train_end + timedelta(val_day_num) test_end = date.today() test_delta = (test_end - test_start) print(f'Number of days of Holdout Test Data {test_delta.days}') ticker = "CORN" # Ticker Symbol to Test interval = "5Min" # Interval of bars train_day_int = train_delta.days # Size of training set (Jan 2010 - Oct 2017) val_day_int = val_day_num # Size of validation set test_day_int = test_delta.days # Size of test set offset_day_int = 0 # Number of days to off set the training data train_raw, val_raw, test_raw, full_raw, offset_raw, complete_raw, traintest_day, testval_day = prepost_train_test_validate_offset_data(api, ticker, interval, train_days=train_day_int, test_days=test_day_int, validate_days=val_day_int, offset_days = offset_day_int) def timeFilterAndBackfill(df): """ Prep df to be filled out for each trading day: Time Frame: 0930-1930 Backfilling NaNs Adjusting Volume to Zero if no Trading data is present - Assumption is that there were no trades duing that time We will build over lapping arrays by 30 min to give ourselfs more oppurtunities to predict during a given trading day """ df = df.between_time('07:29','17:29') # intial sorting of data TZ = 'US/Eastern' # define the correct timezone start_dateTime = pd.Timestamp(year = df.index[0].year, month = df.index[0].month, day = df.index[0].day, hour = 7, minute = 25, tz = TZ) end_dateTime = pd.Timestamp(year = df.index[-1].year, month = df.index[-1].month, day = df.index[-1].day, hour = 17, minute = 35, tz = TZ) # build blank index that has ever 5 min interval represented dateTime_index = pd.date_range(start_dateTime, end_dateTime, freq='5min').tolist() dateTime_index_df = pd.DataFrame() dateTime_index_df["Time"] = dateTime_index filtered_df = pd.merge_asof(dateTime_index_df, df, on='Time').set_index("Time").between_time('09:29','17:29') # create the close array by back filling NA, to represent no change in close closeset_list = [] prev_c = None for c in filtered_df["Close"]: if prev_c == None: if math.isnan(c): prev_c = 0 closeset_list.append(0) else: prev_c = c closeset_list.append(c) elif prev_c != None: if c == prev_c: closeset_list.append(c) elif math.isnan(c): closeset_list.append(prev_c) else: closeset_list.append(c) prev_c = c filtered_df["Close"] = closeset_list # create the volume volumeset_list = [] prev_v = None for v in filtered_df["Volume"]: if prev_v == None: if math.isnan(v): prev_v = 0 volumeset_list.append(0) else: prev_v = v volumeset_list.append(v) elif prev_v != None: if v == prev_v: volumeset_list.append(0) prev_v = v elif math.isnan(v): volumeset_list.append(0) prev_v = 0 else: volumeset_list.append(v) prev_v = v filtered_df["Volume"] = volumeset_list adjvolumeset_list = [] prev_v = None for v in filtered_df["VolumeWeightedAvgPrice"]: if prev_v == None: if math.isnan(v): prev_v = 0 adjvolumeset_list.append(0) else: prev_v = v adjvolumeset_list.append(v) elif prev_v != None: if v == prev_v: adjvolumeset_list.append(0) prev_v = v elif math.isnan(v): adjvolumeset_list.append(0) prev_v = 0 else: adjvolumeset_list.append(v) prev_v = v filtered_df["VolumeWeightedAvgPrice"] = adjvolumeset_list preped_df = filtered_df.backfill() return preped_df train_raw[275:300] def buildTargets_VolOnly(full_df = full_raw, train_observations = train_raw.shape[0], val_observations = val_raw.shape[0], test_observations = test_raw.shape[0], alph = .55, volity_int = 10): """ This function will take a complete set of train, val, and test data and return the targets. Volitility will be calculated over the 252 5min incriments The Target shift is looking at 2 hours shift from current time """ returns = np.log(full_df['Close']/(full_df['Close'].shift())) returns.fillna(0, inplace=True) volatility = returns.rolling(window=(volity_int)).std()*np.sqrt(volity_int) return volatility #return train_targets, val_targets, test_targets, full_targets volatility = buildTargets_VolOnly() fig = plt.figure(figsize=(15, 7)) ax1 = fig.add_subplot(1, 1, 1) volatility.plot(ax=ax1, color = "red") ax1.set_xlabel('Date') ax1.set_ylabel('Volatility', color = "red") ax1.set_title(f'Annualized volatility for {ticker}') ax2 = ax1.twinx() full_raw.Close.plot(ax=ax2, color = "blue") ax2.set_ylabel('Close', color = "blue") ax2.axvline(x=full_raw.index[train_raw.shape[0]]) ax2.axvline(x=full_raw.index[val_raw.shape[0]+train_raw.shape[0]]) plt.show() train = timeFilterAndBackfill(train_raw) val = timeFilterAndBackfill(val_raw) test = timeFilterAndBackfill(test_raw) train = train[train.index.dayofweek <= 4].copy() val = val[val.index.dayofweek <= 4].copy() test = test[test.index.dayofweek <= 4].copy() train["Open"] = np.where((train["Volume"] == 0), train["Close"], train["Open"]) train["High"] = np.where((train["Volume"] == 0), train["Close"], train["High"]) train["Low"] = np.where((train["Volume"] == 0), train["Close"], train["Low"]) val["Open"] = np.where((val["Volume"] == 0), val["Close"], val["Open"]) val["High"] = np.where((val["Volume"] == 0), val["Close"], val["High"]) val["Low"] = np.where((val["Volume"] == 0), val["Close"], val["Low"]) test["Open"] = np.where((test["Volume"] == 0), test["Close"], test["Open"]) test["High"] = np.where((test["Volume"] == 0), test["Close"], test["High"]) test["Low"] = np.where((test["Volume"] == 0), test["Close"], test["Low"]) def strided_axis0(a, L, overlap=1): if L==overlap: raise Exception("Overlap arg must be smaller than length of windows") S = L - overlap nd0 = ((len(a)-L)//S)+1 if nd0*S-S!=len(a)-L: warnings.warn("Not all elements were covered") m,n = a.shape s0,s1 = a.strides return np.lib.stride_tricks.as_strided(a, shape=(nd0,L,n), strides=(S*s0,s0,s1)) # OLDER CODE WITHOUT OVERLAP OF LABELING # def blockshaped(arr, nrows, ncols): # """ # Return an array of shape (n, nrows, ncols) where # n * nrows * ncols = arr.size # If arr is a 2D array, the returned array should look like n subblocks with # each subblock preserving the "physical" layout of arr. # """ # h, w = arr.shape # assert h % nrows == 0, f"{h} rows is not evenly divisible by {nrows}" # assert w % ncols == 0, f"{w} cols is not evenly divisible by {ncols}" # return np.flip(np.rot90((arr.reshape(h//nrows, nrows, -1, ncols) # .swapaxes(1,2) # .reshape(-1, nrows, ncols)), axes = (1, 2)), axis = 1) def blockshaped(arr, nrows, ncols, overlapping_5min_intervals = 12): """ Return an array of shape (n, nrows, ncols) where n * nrows * ncols = arr.size If arr is a 2D array, the returned array should look like n subblocks with each subblock preserving the "physical" layout of arr. """ h, w = arr.shape assert h % nrows == 0, f"{h} rows is not evenly divisible by {nrows}" assert w % ncols == 0, f"{w} cols is not evenly divisible by {ncols}" return np.flip(np.rot90((strided_axis0(arr, 24, overlap=overlapping_5min_intervals).reshape(-1, nrows, ncols)), axes = (1, 2)), axis = 1) train_tonp = train[["Open", "High", "Low", "Close", "Volume"]] val_tonp = val[["Open", "High", "Low", "Close", "Volume"]] test_tonp = test[["Open", "High", "Low", "Close", "Volume"]] train_array = train_tonp.to_numpy() val_array = val_tonp.to_numpy() test_array = test_tonp.to_numpy() X_train_pre_final = blockshaped(train_array, 24, 5, overlapping_5min_intervals = 12) X_val_pre_final = blockshaped(val_array, 24, 5, overlapping_5min_intervals = 12) X_test_pre_final = blockshaped(test_array, 24, 5, overlapping_5min_intervals = 12) # X_train_pre_final = blockshaped(train_array, 24, 5) # X_val_pre_final = blockshaped(val_array, 24, 5) # X_test_pre_final = blockshaped(test_array, 24, 5) X_train_pre_final[0] # create target from OHLC and Volume Data def buildTargets(obs_array, alph = .55, volity_int = 10): """ This function will take a complete set of train, val, and test data and return the targets. Volitility will be calculated over the 24 5min incriments. The Target shift is looking at 2 hours shift from current time shift_2hour = The amount of time the data interval take to equal 2 hours (i.e. 5 min data interval is equal to 24) alph = The alpha value for calculating the shift in price volity_int = the number of incriments used to calculate volitility """ target_close_list =[] for arr in obs_array: target_close_list.append(arr[3][-1]) target_close_df = pd.DataFrame() target_close_df["Close"] = target_close_list target_close_df["Volitility"] = target_close_df["Close"].rolling(volity_int).std() # print(len(volatility), len(target_close_df["Close"])) targets = [2] * len(target_close_df.Close) targets = np.where(target_close_df.Close.shift() >= (target_close_df.Close * (1 + alph * target_close_df["Volitility"])), 1, targets) targets = np.where(target_close_df.Close.shift() <= (target_close_df.Close * (1 - alph * target_close_df["Volitility"])), 0, targets) return targets volity_val = 10 alph = .015 y_train_pre_final = buildTargets(X_train_pre_final, alph=alph, volity_int = volity_val) y_val_pre_final = buildTargets(X_val_pre_final, alph=alph, volity_int = volity_val) y_test_pre_final = buildTargets(X_test_pre_final, alph=alph, volity_int = volity_val) def get_class_distribution(obj): count_dict = { "up": 0, "flat": 0, "down": 0, } for i in obj: if i == 1: count_dict['up'] += 1 elif i == 0: count_dict['down'] += 1 elif i == 2: count_dict['flat'] += 1 else: print("Check classes.") return count_dict bfig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25,7)) # Train sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_train_pre_final)]).melt(), x = "variable", y="value", hue="variable", ax=axes[0]).set_title('Class Distribution in Train Set') # Validation sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_val_pre_final)]).melt(), x = "variable", y="value", hue="variable", ax=axes[1]).set_title('Class Distribution in Val Set') # Test sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_test_pre_final)]).melt(), x = "variable", y="value", hue="variable", ax=axes[2]).set_title('Class Distribution in Test Set') def createFinalData_RemoveLateAfternoonData(arr, labels): assert arr.shape[0] == len(labels), "X data do not match length of y labels" step_count = 0 filtered_y_labels = [] for i in range(arr.shape[0]): if i == 0: final_arr = arr[i] filtered_y_labels.append(labels[i]) #print(f'Appending index {i}, step_count: {step_count}') step_count += 1 elif i == 1: final_arr = np.stack((final_arr, arr[i])) filtered_y_labels.append(labels[i]) step_count += 1 elif step_count == 0: final_arr = np.vstack((final_arr, arr[i][None])) filtered_y_labels.append(labels[i]) #print(f'Appending index {i}, step_count: {step_count}') step_count += 1 elif (step_count) % 5 == 0: #print(f'skipping {i} array, step_count: {step_count}') step_count += 1 elif (step_count) % 6 == 0: #print(f'skipping {i} array, step_count: {step_count}') step_count += 1 elif (step_count) % 7 == 0: #print(f'skipping {i} array, step_count: {step_count}') step_count = 0 else: final_arr = np.vstack((final_arr, arr[i][None])) filtered_y_labels.append(labels[i]) #print(f'Appending index {i}, step_count: {step_count}') step_count += 1 return final_arr, filtered_y_labels X_train, y_train = createFinalData_RemoveLateAfternoonData(X_train_pre_final, y_train_pre_final) X_val, y_val = createFinalData_RemoveLateAfternoonData(X_val_pre_final, y_val_pre_final) X_test, y_test = createFinalData_RemoveLateAfternoonData(X_test_pre_final, y_test_pre_final) y_train = np.array(y_train) y_val = np.array(y_val) y_test = np.array(y_test) # Check it arrays are made correctly train[12:48] np.set_printoptions(threshold=200) y_train_pre_final[0:24] ###### # Code fro scaling at a later date ###### # from sklearn.preprocessing import MinMaxScaler scalers = {} for i in range(X_train.shape[1]): scalers[i] = MinMaxScaler() X_train[:, i, :] = scalers[i].fit_transform(X_train[:, i, :]) for i in range(X_val.shape[1]): scalers[i] = MinMaxScaler() X_val[:, i, :] = scalers[i].fit_transform(X_val[:, i, :]) for i in range(X_test.shape[1]): scalers[i] = MinMaxScaler() X_test[:, i, :] = scalers[i].fit_transform(X_test[:, i, :]) def get_class_distribution(obj): count_dict = { "up": 0, "flat": 0, "down": 0, } for i in obj: if i == 1: count_dict['up'] += 1 elif i == 0: count_dict['down'] += 1 elif i == 2: count_dict['flat'] += 1 else: print("Check classes.") return count_dict bfig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25,7)) # Train sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_train)]).melt(), x = "variable", y="value", hue="variable", ax=axes[0]).set_title('Class Distribution in Train Set') # Validation sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_val)]).melt(), x = "variable", y="value", hue="variable", ax=axes[1]).set_title('Class Distribution in Val Set') # Test sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_test)]).melt(), x = "variable", y="value", hue="variable", ax=axes[2]).set_title('Class Distribution in Test Set') ###### ONLY EXECUTE FOR 2D CNN ##### X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1], X_train.shape[2]) X_val = X_val.reshape(X_val.shape[0], 1, X_val.shape[1], X_val.shape[2]) X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1], X_test.shape[2]) print(f'X Train Length {X_train.shape}, y Train Label Length {y_train.shape}') print(f'X Val Length {X_val.shape}, y Val Label Length {y_val.shape}') print(f'X Test Length {X_test.shape}, y Test Label Length {y_test.shape}') ``` # 2D CNN Build Model ``` trainset = TensorDataset(torch.from_numpy(X_train).float(), torch.from_numpy(y_train).long()) valset = TensorDataset(torch.from_numpy(X_val).float(), torch.from_numpy(y_val).long()) testset = TensorDataset(torch.from_numpy(X_test).float(), torch.from_numpy(y_test).long()) trainset batch_size = 1 # train_data = [] # for i in range(len(X_train)): # train_data.append([X_train[i].astype('float'), y_train[i]]) train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=False) i1, l1 = next(iter(train_loader)) print(i1.shape) # val_data = [] # for i in range(len(X_val)): # val_data.append([X_val[i].astype('float'), y_val[i]]) val_loader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False) i1, l1 = next(iter(val_loader)) print(i1.shape) test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False) i1, l1 = next(iter(test_loader)) print(i1.shape) # Get next batch of training images windows, labels = iter(train_loader).next() print(windows) windows = windows.numpy() # plot the windows in the batch, along with the corresponding labels for idx in range(batch_size): print(labels[idx]) # Set up dict for dataloaders dataloaders = {'train':train_loader,'val':val_loader} # Store size of training and validation sets dataset_sizes = {'train':len(trainset),'val':len(valset)} # Get class names associated with labels classes = [0,1,2] class StockShiftClassification(nn.Module): def __init__(self): super(StockShiftClassification, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size = (1,3), stride=1, padding = 1) self.pool1 = nn.MaxPool2d((1,4),4) self.conv2 = nn.Conv2d(32, 64, kernel_size = (1,3), stride=1, padding = 1) self.pool2 = nn.MaxPool2d((1,3),3) self.conv3 = nn.Conv2d(64, 128, kernel_size = (1,3), stride=1, padding = 1) self.pool3 = nn.MaxPool2d((1,2),2) self.fc1 = nn.Linear(256,1000) #calculate this self.fc2 = nn.Linear(1000, 500) #self.fc3 = nn.Linear(500, 3) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool1(x) x = F.relu(self.conv2(x)) x = self.pool2(x) x = F.relu(self.conv3(x)) x = self.pool3(x) #print(x.size(1)) x = x.view(x.size(0), -1) # Linear layer x = self.fc1(x) x = self.fc2(x) #x = self.fc3(x) output = x#F.softmax(x, dim=1) return output # Instantiate the model net = StockShiftClassification().float() # Display a summary of the layers of the model and output shape after each layer summary(net,(windows.shape[1:]),batch_size=batch_size,device="cpu") def train_model(model, criterion, optimizer, train_loaders, device, num_epochs=50, scheduler=onecycle_scheduler): model = model.to(device) # Send model to GPU if available writer = SummaryWriter() # Instantiate TensorBoard iter_num = {'train':0,'val':0} # Track total number of iterations for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Get the input images and labels, and send to GPU if available for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # Zero the weight gradients optimizer.zero_grad() # Forward pass to get outputs and calculate loss # Track gradient only for training data with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) # print(outputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # Backpropagation to get the gradients with respect to each weight # Only if in train if phase == 'train': loss.backward() # Update the weights optimizer.step() # Convert loss into a scalar and add it to running_loss running_loss += loss.item() * inputs.size(0) # Track number of correct predictions running_corrects += torch.sum(preds == labels.data) # Iterate count of iterations iter_num[phase] += 1 # Write loss for batch to TensorBoard writer.add_scalar("{} / batch loss".format(phase), loss.item(), iter_num[phase]) # scheduler.step() # Calculate and display average loss and accuracy for the epoch epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # Write loss and accuracy for epoch to TensorBoard writer.add_scalar("{} / epoch loss".format(phase), epoch_loss, epoch) writer.add_scalar("{} / epoch accuracy".format(phase), epoch_acc, epoch) writer.close() return # Train the model device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Cross entropy loss combines softmax and nn.NLLLoss() in one single class. weights = torch.tensor([1.5, 2.25, 1.]).to(device) criterion_weighted = nn.CrossEntropyLoss(weight=weights) criterion = nn.CrossEntropyLoss() # Define optimizer #optimizer = optim.SGD(net.parameters(), lr=0.001) optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=0.00001) n_epochs= 10 # For demo purposes. Use epochs>100 for actual training onecycle_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, base_momentum = 0.8, steps_per_epoch=len(train_loader), epochs=n_epochs) train_model(net, criterion, optimizer, dataloaders, device, num_epochs=n_epochs) #, scheduler=onecycle_scheduler) def test_model(model,val_loader,device): # Turn autograd off with torch.no_grad(): # Set the model to evaluation mode model = model.to(device) model.eval() # Set up lists to store true and predicted values y_true = [] test_preds = [] # Calculate the predictions on the test set and add to list for data in val_loader: inputs, labels = data[0].to(device), data[1].to(device) # Feed inputs through model to get raw scores logits = model.forward(inputs) #print(f'Logits: {logits}') # Convert raw scores to probabilities (not necessary since we just care about discrete probs in this case) probs = F.log_softmax(logits, dim=1) #print(f'Probs after LogSoft: {probs}') # Get discrete predictions using argmax preds = np.argmax(probs.cpu().numpy(),axis=1) # Add predictions and actuals to lists test_preds.extend(preds) y_true.extend(labels) # Calculate the accuracy test_preds = np.array(test_preds) y_true = np.array(y_true) test_acc = np.sum(test_preds == y_true)/y_true.shape[0] # Recall for each class recall_vals = [] for i in range(3): class_idx = np.argwhere(y_true==i) total = len(class_idx) correct = np.sum(test_preds[class_idx]==i) recall = correct / total recall_vals.append(recall) return test_acc, recall_vals # Calculate the test set accuracy and recall for each class acc,recall_vals = test_model(net,val_loader,device) print('Test set accuracy is {:.3f}'.format(acc)) for i in range(3): print('For class {}, recall is {}'.format(classes[i],recall_vals[i])) from sklearn.metrics import confusion_matrix def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True): """ given a sklearn confusion matrix (cm), make a nice plot Arguments --------- cm: confusion matrix from sklearn.metrics.confusion_matrix target_names: given classification classes such as [0, 1, 2] the class names, for example: ['high', 'medium', 'low'] title: the text to display at the top of the matrix cmap: the gradient of the values displayed from matplotlib.pyplot.cm see http://matplotlib.org/examples/color/colormaps_reference.html plt.get_cmap('jet') or plt.cm.Blues normalize: If False, plot the raw numbers If True, plot the proportions Usage ----- plot_confusion_matrix(cm = cm, # confusion matrix created by # sklearn.metrics.confusion_matrix normalize = True, # show proportions target_names = y_labels_vals, # list of names of the classes title = best_estimator_name) # title of graph Citiation --------- http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html """ import matplotlib.pyplot as plt import numpy as np import itertools accuracy = np.trace(cm) / np.sum(cm).astype('float') misclass = 1 - accuracy if cmap is None: cmap = plt.get_cmap('Blues') plt.figure(figsize=(8, 6)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 1.5 if normalize else cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if normalize: plt.text(j, i, "{:0.4f}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") else: plt.text(j, i, "{:,}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) plt.show() nb_classes = 9 # Initialize the prediction and label lists(tensors) predlist=torch.zeros(0,dtype=torch.long, device='cpu') lbllist=torch.zeros(0,dtype=torch.long, device='cpu') with torch.no_grad(): for i, (inputs, classes) in enumerate(dataloaders['val']): inputs = inputs.to(device) classes = classes.to(device) outputs = net.forward(inputs) _, preds = torch.max(outputs, 1) # Append batch prediction results predlist=torch.cat([predlist,preds.view(-1).cpu()]) lbllist=torch.cat([lbllist,classes.view(-1).cpu()]) # Confusion matrix conf_mat=confusion_matrix(lbllist.numpy(), predlist.numpy()) plot_confusion_matrix(conf_mat, [0,1,2]) from sklearn.metrics import precision_score precision_score(lbllist.numpy(), predlist.numpy(), average='weighted') from sklearn.metrics import classification_report print(classification_report(lbllist.numpy(), predlist.numpy(), target_names=["down","up","flat"], digits=4)) train_x = torch.from_numpy(X_train).float() train_y = torch.from_numpy(y_train).long() val_x = torch.from_numpy(X_val).float() val_y = torch.from_numpy(y_val).long() # defining the model model = net # defining the optimizer optimizer = Adam(model.parameters(), lr=0.07) # defining the loss function criterion = CrossEntropyLoss() # checking if GPU is available if torch.cuda.is_available(): model = model.cuda() criterion = criterion.cuda() from torch.autograd import Variable def train(epoch, train_x, train_y, val_x, val_y): model.train() tr_loss = 0 # getting the training set x_train, y_train = Variable(train_x), Variable(train_y) # getting the validation set x_val, y_val = Variable(val_x), Variable(val_y) # converting the data into GPU format if torch.cuda.is_available(): x_train = x_train.cuda() y_train = y_train.cuda() x_val = x_val.cuda() y_val = y_val.cuda() # clearing the Gradients of the model parameters optimizer.zero_grad() # prediction for training and validation set output_train = model(x_train) output_val = model(x_val) # computing the training and validation loss loss_train = criterion(output_train, y_train) loss_val = criterion(output_val, y_val) train_losses.append(loss_train) val_losses.append(loss_val) # computing the updated weights of all the model parameters loss_train.backward() optimizer.step() tr_loss = loss_train.item() if epoch%2 == 0: # printing the validation loss print('Epoch : ',epoch+1, '\t', 'loss :', loss_val) # defining the number of epochs n_epochs = 100 # empty list to store training losses train_losses = [] # empty list to store validation losses val_losses = [] # training the model for epoch in range(n_epochs): train(epoch, X_train, y_train, X_val, y_val) # plotting the training and validation loss plt.plot(train_losses, label='Training loss') plt.plot(val_losses, label='Validation loss') plt.legend() plt.show() from sklearn.metrics import accuracy_score from tqdm import tqdm with torch.no_grad(): output = model(X_val.cuda()) softmax = torch.exp(output).cpu() prob = list(softmax.numpy()) predictions = np.argmax(prob, axis=1) # accuracy on training set accuracy_score(y_val, predictions) # defining the number of epochs n_epochs = 25 # empty list to store training losses train_losses = [] # empty list to store validation losses val_losses = [] # training the model for epoch in range(n_epochs): train(epoch) def train_model(model, criterion, optimizer, train_loaders, device, num_epochs=50): #, scheduler=onecycle_scheduler): model = model.to(device) # Send model to GPU if available writer = SummaryWriter() # Instantiate TensorBoard iter_num = {'train':0,'val':0} # Track total number of iterations for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Get the input images and labels, and send to GPU if available for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # Zero the weight gradients optimizer.zero_grad() # Forward pass to get outputs and calculate loss # Track gradient only for training data with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # Backpropagation to get the gradients with respect to each weight # Only if in train if phase == 'train': loss.backward() # Update the weights optimizer.step() # Convert loss into a scalar and add it to running_loss running_loss += loss.item() * inputs.size(0) # Track number of correct predictions running_corrects += torch.sum(preds == labels.data) # Iterate count of iterations iter_num[phase] += 1 # Write loss for batch to TensorBoard writer.add_scalar("{} / batch loss".format(phase), loss.item(), iter_num[phase]) # scheduler.step() # Calculate and display average loss and accuracy for the epoch epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # Write loss and accuracy for epoch to TensorBoard writer.add_scalar("{} / epoch loss".format(phase), epoch_loss, epoch) writer.add_scalar("{} / epoch accuracy".format(phase), epoch_acc, epoch) writer.close() return # Train the model device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Cross entropy loss combines softmax and nn.NLLLoss() in one single class. weights = torch.tensor([1.75, 2.25, 1.]).to(device) criterion_weighted = nn.CrossEntropyLoss(weight=weights) criterion = nn.CrossEntropyLoss() # Define optimizer #optimizer = optim.SGD(net.parameters(), lr=0.001) optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=0.00001) n_epochs= 10 # For demo purposes. Use epochs>100 for actual training # onecycle_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, # max_lr=0.01, # base_momentum = 0.8, # steps_per_epoch=len(train_loader), # epochs=n_epochs) train_model(net, criterion, optimizer, dataloaders, device, num_epochs=n_epochs) #, scheduler=onecycle_scheduler) def test_model(model,val_loader,device): # Turn autograd off with torch.no_grad(): # Set the model to evaluation mode model.eval() # Set up lists to store true and predicted values y_true = [] test_preds = [] # Calculate the predictions on the test set and add to list for data in val_loader: inputs, labels = data[0].to(device), data[1].to(device) # Feed inputs through model to get raw scores logits = model.forward(inputs) #print(f'Logits: {logits}') # Convert raw scores to probabilities (not necessary since we just care about discrete probs in this case) probs = F.softmax(logits, dim=0) # print(f'Probs after LogSoft: {probs}') # Get discrete predictions using argmax preds = np.argmax(probs.cpu().numpy(),axis=1) # Add predictions and actuals to lists test_preds.extend(preds) y_true.extend(labels) # Calculate the accuracy test_preds = np.array(test_preds) y_true = np.array(y_true) test_acc = np.sum(test_preds == y_true)/y_true.shape[0] # Recall for each class recall_vals = [] for i in range(2): class_idx = np.argwhere(y_true==i) total = len(class_idx) correct = np.sum(test_preds[class_idx]==i) recall = correct / total recall_vals.append(recall) return test_acc, recall_vals # Calculate the test set accuracy and recall for each class acc,recall_vals = test_model(net,test_loader,device) print('Test set accuracy is {:.3f}'.format(acc)) for i in range(2): print('For class {}, recall is {}'.format(classes[i],recall_vals[i])) import time def train(model, optimizer, loss_fn, train_dl, val_dl, epochs=100, device='cpu'): print('train() called: model=%s, opt=%s(lr=%f), epochs=%d, device=%s\n' % \ (type(model).__name__, type(optimizer).__name__, optimizer.param_groups[0]['lr'], epochs, device)) history = {} # Collects per-epoch loss and acc like Keras' fit(). history['loss'] = [] history['val_loss'] = [] history['acc'] = [] history['val_acc'] = [] start_time_sec = time.time() for epoch in range(1, epochs+1): # --- TRAIN AND EVALUATE ON TRAINING SET ----------------------------- model.train() train_loss = 0.0 num_train_correct = 0 num_train_examples = 0 for batch in train_dl: optimizer.zero_grad() x = batch[0].to(device) y = batch[1].to(device) yhat = model(x) loss = loss_fn(yhat, y) loss.backward() optimizer.step() train_loss += loss.data.item() * x.size(0) num_train_correct += (torch.max(yhat, 1)[1] == y).sum().item() num_train_examples += x.shape[0] train_acc = num_train_correct / num_train_examples train_loss = train_loss / len(train_dl.dataset) # --- EVALUATE ON VALIDATION SET ------------------------------------- model.eval() val_loss = 0.0 num_val_correct = 0 num_val_examples = 0 for batch in val_dl: x = batch[0].to(device) y = batch[1].to(device) yhat = model(x) loss = loss_fn(yhat, y) val_loss += loss.data.item() * x.size(0) num_val_correct += (torch.max(yhat, 1)[1] == y).sum().item() num_val_examples += y.shape[0] val_acc = num_val_correct / num_val_examples val_loss = val_loss / len(val_dl.dataset) if epoch == 1 or epoch % 10 == 0: print('Epoch %3d/%3d, train loss: %5.2f, train acc: %5.2f, val loss: %5.2f, val acc: %5.2f' % \ (epoch, epochs, train_loss, train_acc, val_loss, val_acc)) history['loss'].append(train_loss) history['val_loss'].append(val_loss) history['acc'].append(train_acc) history['val_acc'].append(val_acc) # END OF TRAINING LOOP end_time_sec = time.time() total_time_sec = end_time_sec - start_time_sec time_per_epoch_sec = total_time_sec / epochs print() print('Time total: %5.2f sec' % (total_time_sec)) print('Time per epoch: %5.2f sec' % (time_per_epoch_sec)) return history y_flat_num = y_train[np.where(y_train == 2)].size y_down_weight = round((y_flat_num / y_train[np.where(y_train == 0)].size) * 1.2, 3) y_up_weight = round((y_flat_num / y_train[np.where(y_train == 1)].size) * 1.5, 3) print(y_down_weight, y_up_weight, 1) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = net.to(device) criterion = nn.CrossEntropyLoss() # weights = torch.tensor([y_down_weight, y_up_weight, 1.]).to(device) # criterion_weighted = nn.CrossEntropyLoss(weight=weights) optimizer = torch.optim.Adam(net.parameters(), lr = 0.001, weight_decay=0.00001) epochs = 20 history = train( model = model, optimizer = optimizer, loss_fn = criterion, train_dl = train_loader, val_dl = test_loader, epochs=epochs, device=device) import matplotlib.pyplot as plt acc = history['acc'] val_acc = history['val_acc'] loss = history['loss'] val_loss = history['val_loss'] epochs = range(1, len(acc) + 1) plt.plot(epochs, acc, 'b', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'b', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() def test_model(model,val_loader,device): # Turn autograd off with torch.no_grad(): # Set the model to evaluation mode model = model.to(device) model.eval() # Set up lists to store true and predicted values y_true = [] test_preds = [] # Calculate the predictions on the test set and add to list for data in val_loader: inputs, labels = data[0].to(device), data[1].to(device) # Feed inputs through model to get raw scores logits = model.forward(inputs) #print(f'Logits: {logits}') # Convert raw scores to probabilities (not necessary since we just care about discrete probs in this case) probs = F.softmax(logits) # print(f'Probs after LogSoft: {probs}') # Get discrete predictions using argmax preds = np.argmax(probs.cpu().numpy(),axis=1) # Add predictions and actuals to lists test_preds.extend(preds) y_true.extend(labels) # Calculate the accuracy test_preds = np.array(test_preds) y_true = np.array(y_true) test_acc = np.sum(test_preds == y_true)/y_true.shape[0] # Recall for each class recall_vals = [] for i in range(2): class_idx = np.argwhere(y_true==i) total = len(class_idx) correct = np.sum(test_preds[class_idx]==i) recall = correct / total recall_vals.append(recall) return test_acc, recall_vals # Calculate the test set accuracy and recall for each class acc,recall_vals = test_model(model,test_loader,device) print('Test set accuracy is {:.3f}'.format(acc)) for i in range(2): print('For class {}, recall is {}'.format(classes[i],recall_vals[i])) from sklearn.metrics import confusion_matrix def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True): """ given a sklearn confusion matrix (cm), make a nice plot Arguments --------- cm: confusion matrix from sklearn.metrics.confusion_matrix target_names: given classification classes such as [0, 1, 2] the class names, for example: ['high', 'medium', 'low'] title: the text to display at the top of the matrix cmap: the gradient of the values displayed from matplotlib.pyplot.cm see http://matplotlib.org/examples/color/colormaps_reference.html plt.get_cmap('jet') or plt.cm.Blues normalize: If False, plot the raw numbers If True, plot the proportions Usage ----- plot_confusion_matrix(cm = cm, # confusion matrix created by # sklearn.metrics.confusion_matrix normalize = True, # show proportions target_names = y_labels_vals, # list of names of the classes title = best_estimator_name) # title of graph Citiation --------- http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html """ import matplotlib.pyplot as plt import numpy as np import itertools accuracy = np.trace(cm) / np.sum(cm).astype('float') misclass = 1 - accuracy if cmap is None: cmap = plt.get_cmap('Blues') plt.figure(figsize=(8, 6)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 1.5 if normalize else cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if normalize: plt.text(j, i, "{:0.4f}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") else: plt.text(j, i, "{:,}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) plt.show() nb_classes = 2 # Initialize the prediction and label lists(tensors) predlist=torch.zeros(0,dtype=torch.long, device='cpu') lbllist=torch.zeros(0,dtype=torch.long, device='cpu') with torch.no_grad(): for i, (inputs, classes) in enumerate(dataloaders['val']): # print(inputs) inputs = inputs.to(device) classes = classes.to(device) outputs = model.forward(inputs) #print(outputs) _, preds = torch.max(outputs, 1) # Append batch prediction results predlist=torch.cat([predlist,preds.view(-1).cpu()]) lbllist=torch.cat([lbllist,classes.view(-1).cpu()]) # Confusion matrix conf_mat=confusion_matrix(lbllist.numpy(), predlist.numpy()) plot_confusion_matrix(conf_mat, [0,1]) from sklearn.metrics import precision_score precision_score(lbllist.numpy(), predlist.numpy(), average='weighted') from sklearn.metrics import classification_report print(classification_report(lbllist.numpy(), predlist.numpy(), target_names=["down","up"], digits=4)) ```
github_jupyter
# Spark on Kubernetes Preparing the notebook https://towardsdatascience.com/make-kubeflow-into-your-own-data-science-workspace-cc8162969e29 ## Setup service account permissions https://github.com/kubeflow/kubeflow/issues/4306 issue with launching spark-operator from jupyter notebook Run command in your shell (not in notebook) ```shell export NAMESPACE=<your_namespace> kubectl create serviceaccount spark -n ${NAMESPACE} kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount=${NAMESPACE}:spark --namespace=${NAMESPACE} ``` ## Python version > Note: Make sure your driver python and executor python version matches. > Otherwise, you will see error msg like below Exception: Python in worker has different version 3.7 than that in driver 3.6, PySpark cannot run with different minor versions.Please check environment variables `PYSPARK_PYTHON` and `PYSPARK_DRIVER_PYTHON` are correctly set. ``` import sys print(sys.version) ``` ## Client Mode ``` import findspark, pyspark,socket from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession findspark.init() localIpAddress = socket.gethostbyname(socket.gethostname()) conf = SparkConf().setAppName('sparktest1') conf.setMaster('k8s://https://kubernetes.default.svc:443') conf.set("spark.submit.deployMode", "client") conf.set("spark.executor.instances", "2") conf.set("spark.driver.host", localIpAddress) conf.set("spark.driver.port", "7778") conf.set("spark.kubernetes.namespace", "yahavb") conf.set("spark.kubernetes.container.image", "seedjeffwan/spark-py:v2.4.6") conf.set("spark.kubernetes.pyspark.pythonVersion", "3") conf.set("spark.kubernetes.authenticate.driver.serviceAccountName", "spark") conf.set("spark.kubernetes.executor.annotation.sidecar.istio.io/inject", "false") sc = pyspark.context.SparkContext.getOrCreate(conf=conf) # following works as well # spark = SparkSession.builder.config(conf=conf).getOrCreate() num_samples = 100000 def inside(p): x, y = random.random(), random.random() return x*x + y*y < 1 count = sc.parallelize(range(0, num_samples)).filter(inside).count() sc.stop() ``` ## Cluster Mode ## Java ``` %%bash /opt/spark-2.4.6/bin/spark-submit --master "k8s://https://kubernetes.default.svc:443" \ --deploy-mode cluster \ --name spark-java-pi \ --class org.apache.spark.examples.SparkPi \ --conf spark.executor.instances=30 \ --conf spark.kubernetes.namespace=yahavb \ --conf spark.kubernetes.driver.annotation.sidecar.istio.io/inject=false \ --conf spark.kubernetes.executor.annotation.sidecar.istio.io/inject=false \ --conf spark.kubernetes.container.image=seedjeffwan/spark:v2.4.6 \ --conf spark.kubernetes.driver.pod.name=spark-java-pi-driver \ --conf spark.kubernetes.executor.request.cores=4 \ --conf spark.kubernetes.node.selector.computetype=gpu \ --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \ local:///opt/spark/examples/jars/spark-examples_2.11-2.4.6.jar 262144 %%bash kubectl -n yahavb delete po ` kubectl -n yahavb get po | grep spark-java-pi-driver | awk '{print $1}'` ``` ## Python ``` %%bash /opt/spark-2.4.6/bin/spark-submit --master "k8s://https://kubernetes.default.svc:443" \ --deploy-mode cluster \ --name spark-python-pi \ --conf spark.executor.instances=50 \ --conf spark.kubernetes.container.image=seedjeffwan/spark-py:v2.4.6 \ --conf spark.kubernetes.driver.pod.name=spark-python-pi-driver \ --conf spark.kubernetes.namespace=yahavb \ --conf spark.kubernetes.driver.annotation.sidecar.istio.io/inject=false \ --conf spark.kubernetes.executor.annotation.sidecar.istio.io/inject=false \ --conf spark.kubernetes.pyspark.pythonVersion=3 \ --conf spark.kubernetes.executor.request.cores=4 \ --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark /opt/spark/examples/src/main/python/pi.py 64000 %%bash kubectl -n yahavb delete po `kubectl -n yahavb get po | grep spark-python-pi-driver | awk '{print $1}'` ```
github_jupyter
``` from Maze import Maze from sarsa_agent import SarsaAgent import numpy as np import matplotlib.pyplot as plt from matplotlib import animation from IPython.display import HTML ``` ## Designing the maze ``` arr=np.array([[0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0], [0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [0,1,0,0,1,0,0,1,1,1,1,1,0,1,1,0,1,1,1,0], [0,1,0,0,1,0,0,0,0,0,1,0,0,1,0,0,1,0,0,0], [0,0,0,0,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,0], [0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,1,1,0,1,1], [1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,1,0,0,0], [0,0,1,0,1,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0], [0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,1,1,0,1,0], [0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,1,0,0,0,0], [1,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,0,1,0,0], [1,0,1,1,1,0,1,0,0,1,0,0,1,1,0,0,0,1,0,0], [1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0], [0,0,0,0,1,0,1,0,0,1,1,0,1,0,0,0,1,1,1,0], [0,0,1,1,1,0,1,0,0,1,0,1,0,0,1,1,0,0,0,0], [0,1,1,0,0,0,0,1,0,1,0,0,1,1,0,1,0,1,1,1], [0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0], [0,0,1,1,1,0,1,1,0,0,1,0,1,0,0,1,1,0,0,0], [1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,1,1,1,0,0], [1,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,1,0,0] ],dtype=float) #Position of the rat rat=(0,0) #If Cheese is None, cheese is placed in the bottom-right cell of the maze cheese=None #The maze object takes the maze maze=Maze(arr,rat,cheese) maze.show_maze() ``` ## Defining a Agent [Sarsa Agent because it uses Sarsa to solve the maze] ``` agent=SarsaAgent(maze) ``` ## Making the agent play episodes and learn ``` agent.learn(episodes=1000) ``` ## Plotting the maze ``` nrow=maze.nrow ncol=maze.ncol fig=plt.figure() ax=fig.gca() ax.set_xticks(np.arange(0.5,ncol,1)) ax.set_yticks(np.arange(0.5,nrow,1)) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.grid('on') img=ax.imshow(maze.maze,cmap="gray",) a=5 ``` ## Making Animation of the maze solution ``` def gen_func(): maze=Maze(arr,rat,cheese) done=False while not done: row,col,_=maze.state cell=(row,col) action=agent.get_policy(cell) maze.step(action) done=maze.get_status() yield maze.get_canvas() def update_plot(canvas): img.set_data(canvas) anim=animation.FuncAnimation(fig,update_plot,gen_func) HTML(anim.to_html5_video()) anim.save("big_maze.gif",animation.PillowWriter()) ```
github_jupyter
## VAE MNIST example: BO in a latent space In this tutorial, we use the MNIST dataset and some standard PyTorch examples to show a synthetic problem where the input to the objective function is a `28 x 28` image. The main idea is to train a [variational auto-encoder (VAE)](https://arxiv.org/abs/1312.6114) on the MNIST dataset and run Bayesian Optimization in the latent space. We also refer readers to [this tutorial](http://krasserm.github.io/2018/04/07/latent-space-optimization/), which discusses [the method](https://arxiv.org/abs/1610.02415) of jointly training a VAE with a predictor (e.g., classifier), and shows a similar tutorial for the MNIST setting. ``` import os import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets # transforms device = torch.device("cuda" if torch.cuda.is_available() else "cpu") dtype = torch.float ``` ### Problem setup Let's first define our synthetic expensive-to-evaluate objective function. We assume that it takes the following form: $$\text{image} \longrightarrow \text{image classifier} \longrightarrow \text{scoring function} \longrightarrow \text{score}.$$ The classifier is a convolutional neural network (CNN) trained using the architecture of the [PyTorch CNN example](https://github.com/pytorch/examples/tree/master/mnist). ``` class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) self.conv2 = nn.Conv2d(20, 50, 5, 1) self.fc1 = nn.Linear(4 * 4 * 50, 500) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4*4*50) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) ``` We next instantiate the CNN for digit recognition and load a pre-trained model. Here, you may have to change `PRETRAINED_LOCATION` to the location of the `pretrained_models` folder on your machine. ``` PRETRAINED_LOCATION = "./pretrained_models" cnn_model = Net().to(device) cnn_state_dict = torch.load(os.path.join(PRETRAINED_LOCATION, "mnist_cnn.pt"), map_location=device) cnn_model.load_state_dict(cnn_state_dict); ``` Our VAE model follows the [PyTorch VAE example](https://github.com/pytorch/examples/tree/master/vae), except that we use the same data transform from the CNN tutorial for consistency. We then instantiate the model and again load a pre-trained model. To train these models, we refer readers to the PyTorch Github repository. ``` class VAE(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar vae_model = VAE().to(device) vae_state_dict = torch.load(os.path.join(PRETRAINED_LOCATION, "mnist_vae.pt"), map_location=device) vae_model.load_state_dict(vae_state_dict); ``` We now define the scoring function that maps digits to scores. The function below prefers the digit '3'. ``` def score(y): """Returns a 'score' for each digit from 0 to 9. It is modeled as a squared exponential centered at the digit '3'. """ return torch.exp(-2 * (y - 3)**2) ``` Given the scoring function, we can now write our overall objective, which as discussed above, starts with an image and outputs a score. Let's say the objective computes the expected score given the probabilities from the classifier. ``` def score_image_recognition(x): """The input x is an image and an expected score based on the CNN classifier and the scoring function is returned. """ with torch.no_grad(): probs = torch.exp(cnn_model(x)) # b x 10 scores = score(torch.arange(10, device=device, dtype=dtype)).expand(probs.shape) return (probs * scores).sum(dim=1) ``` Finally, we define a helper function `decode` that takes as input the parameters `mu` and `logvar` of the variational distribution and performs reparameterization and the decoding. We use batched Bayesian optimization to search over the parameters `mu` and `logvar` ``` def decode(train_x): with torch.no_grad(): decoded = vae_model.decode(train_x) return decoded.view(train_x.shape[0], 1, 28, 28) ``` #### Model initialization and initial random batch We use a `SingleTaskGP` to model the score of an image generated by a latent representation. The model is initialized with points drawn from $[-6, 6]^{20}$. ``` from botorch.models import SingleTaskGP from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood bounds = torch.tensor([[-6.0] * 20, [6.0] * 20], device=device, dtype=dtype) def initialize_model(n=5): # generate training data train_x = (bounds[1] - bounds[0]) * torch.rand(n, 20, device=device, dtype=dtype) + bounds[0] train_obj = score_image_recognition(decode(train_x)) best_observed_value = train_obj.max().item() # define models for objective and constraint model = SingleTaskGP(train_X=train_x, train_Y=train_obj) model = model.to(train_x) mll = ExactMarginalLogLikelihood(model.likelihood, model) mll = mll.to(train_x) return train_x, train_obj, mll, model, best_observed_value ``` #### Define a helper function that performs the essential BO step The helper function below takes an acquisition function as an argument, optimizes it, and returns the batch $\{x_1, x_2, \ldots x_q\}$ along with the observed function values. For this example, we'll use a small batch of $q=3$. ``` from botorch.optim import joint_optimize BATCH_SIZE = 3 def optimize_acqf_and_get_observation(acq_func): """Optimizes the acquisition function, and returns a new candidate and a noisy observation""" # optimize candidates = joint_optimize( acq_function=acq_func, bounds=bounds, q=BATCH_SIZE, num_restarts=10, raw_samples=200, ) # observe new values new_x = candidates.detach() new_obj = score_image_recognition(decode(new_x)) return new_x, new_obj ``` ### Perform Bayesian Optimization loop with qEI The Bayesian optimization "loop" for a batch size of $q$ simply iterates the following steps: (1) given a surrogate model, choose a batch of points $\{x_1, x_2, \ldots x_q\}$, (2) observe $f(x)$ for each $x$ in the batch, and (3) update the surrogate model. We run `N_BATCH=75` iterations. The acquisition function is approximated using `MC_SAMPLES=2000` samples. We also initialize the model with 5 randomly drawn points. ``` from botorch import fit_gpytorch_model from botorch.acquisition.monte_carlo import qExpectedImprovement from botorch.acquisition.sampler import SobolQMCNormalSampler seed=1 torch.manual_seed(seed) N_BATCH = 50 MC_SAMPLES = 2000 best_observed = [] # call helper function to initialize model train_x, train_obj, mll, model, best_value = initialize_model(n=5) best_observed.append(best_value) ``` We are now ready to run the BO loop (this make take a few minutes, depending on your machine). ``` import warnings warnings.filterwarnings("ignore") print(f"\nRunning BO ", end='') from matplotlib import pyplot as plt # run N_BATCH rounds of BayesOpt after the initial random batch for iteration in range(N_BATCH): # fit the model fit_gpytorch_model(mll) # define the qNEI acquisition module using a QMC sampler qmc_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES, seed=seed) qEI = qExpectedImprovement(model=model, sampler=qmc_sampler, best_f=best_value) # optimize and get new observation new_x, new_obj = optimize_acqf_and_get_observation(qEI) # update training points train_x = torch.cat((train_x, new_x)) train_obj = torch.cat((train_obj, new_obj)) # update progress best_value = score_image_recognition(decode(train_x)).max().item() best_observed.append(best_value) # reinitialize the model so it is ready for fitting on next iteration model.set_train_data(train_x, train_obj, strict=False) print(".", end='') ``` EI recommends the best point observed so far. We can visualize what the images corresponding to recommended points *would have* been if the BO process ended at various times. Here, we show the progress of the algorithm by examining the images at 0%, 10%, 25%, 50%, 75%, and 100% completion. The first image is the best image found through the initial random batch. ``` import numpy as np from matplotlib import pyplot as plt %matplotlib inline fig, ax = plt.subplots(1, 6, figsize=(14, 14)) percentages = np.array([0, 10, 25, 50, 75, 100], dtype=np.float32) inds = (N_BATCH * BATCH_SIZE * percentages / 100 + 4).astype(int) for i, ax in enumerate(ax.flat): b = torch.argmax(score_image_recognition(decode(train_x[:inds[i],:])), dim=0) img = decode(train_x[b].view(1, -1)).squeeze().cpu() ax.imshow(img, alpha=0.8, cmap='gray') ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import pandas as pd import matplotlib %matplotlib inline matplotlib.rcParams['figure.figsize'] = (12, 8) # set default figure size, 8in by 6in ``` # Ensemble Learning Sometimes aggregrates or ensembles of many different opinions on a question can perform as well or better than asking a single expert on the same question. This is known as the *wisdom of the crowd* when the aggregrate opinion of people on a question performs as well or better as a single isolated expert in predicting some outcome. Likewise, for machine learning predictors, a similar effect can also often occur. The aggregrate performance of multiple predictors and often make a small but significant improvement on building a classifier or regression predictor for a complex set of data. A group of machine learning predictors is called an *ensemble*, and thus this technique of combining the predictions of an ensemble is known as *Ensemble Learning* . For exampl,e we could train a group of Decision Tree classifiers, each on a different random subset of the training data. To make an ensemble prediciton, you just obtain the predictions of all individual trees, then predict the class that gets the most votes. Such an ensemble of Decision Trees is called a *Random Forest*, and despite the relative simplicity of decision tree predictors, it can be surprisingly powerful as a ML predictor. # Voting Classifiers Say you have several classifiers for the same classification problem (say a Logistic Classifier, and SVM, a Decision Tree and a KNN classifier and perhaps a few more). The simplest way to create an ensemble classifier is to aggregrate the predictions of each classifier and predict the class that gets the most votes. This majority-vote classifier is called a *hard voting* classifier. Somewhat surprisingly, this voting classifier often achieves a higher accuracy than the best classifier in the ensemble. In fact, even if each classifier is a *weak learner* (meaning it only does slightly better than random guessing), the ensemble can still be a *strong learner* (achieving high accuracy). The key to making good ensemble predictors is that you need both a sufficient number of learners (even of weak learners), but also maybe more importantly, the learners need to be "sufficiently diverse", where diverse is a bit fuzzy to define, but in general the classifiers must be as independent as possible, so that even if they are weak predictors, they are weak in different and diverse ways. ``` def flip_unfair_coin(num_flips, head_ratio): """Simulate flipping an unbalanced coin. We return a numpy array of size num_flips, with 0 to represent 1 to represent a head and 0 a tail flip. We generate a head or tail result using the head_ratio probability threshold drawn from a standard uniform distribution. """ # array of correct size to hold resulting simulated flips flips = np.empty(num_flips) # flip the coin the number of indicated times for flip in range(num_flips): flips[flip] = np.random.random() < head_ratio # return the resulting coin flip trials trials return flips def running_heads_ratio(flips): """Given a sequence of flips, where 1 represents a "Head" and 0 a "Tail" flip, return an array of the running ratio of heads / tails """ # array of correct size to hold resulting heads ratio seen at each point in the flips sequence num_flips = flips.shape[0] head_ratios = np.empty(num_flips) # keep track of number of heads seen so far, the ratio is num_heads / num_flips num_heads = 0.0 # calculate ratio for each flips instance in the sequence for flip in range(num_flips): num_heads += flips[flip] head_ratios[flip] = num_heads / (flip + 1) # return the resulting sequence of head ratios seen in the flips return head_ratios NUM_FLIPPERS = 10 NUM_FLIPS = 10000 HEAD_PERCENT = 0.51 # create 3 separate sequences of flippers flippers = np.empty( (NUM_FLIPPERS, NUM_FLIPS) ) for flipper in range(NUM_FLIPPERS): flips = flip_unfair_coin(NUM_FLIPS, HEAD_PERCENT) head_ratios = running_heads_ratio(flips) flippers[flipper] = head_ratios # create an ensemble, in this case we will average the individual flippers ensemble = flippers.mean(axis=0) # plot the resulting head ratio for our flippers flips = np.arange(1, NUM_FLIPS+1) for flipper in range(NUM_FLIPPERS): plt.plot(flips, flippers[flipper], alpha=0.25) plt.plot(flips, ensemble, 'b-', alpha=1.0, label='ensemble decision') plt.ylim([0.42, 0.58]) plt.plot([1, NUM_FLIPS], [HEAD_PERCENT, HEAD_PERCENT], 'k--', label='51 %') plt.plot([1, NUM_FLIPS], [0.5, 0.5], 'k-', label='50 %') plt.xlabel('Number of coin tosses') plt.ylabel('Heads ratio') plt.legend(); ``` ## Scikit-Learn Voting Classifier The following code is an example of creating a voting classifier in Scikit-Learn. We are using the moons dataset shown. Here we create 3 separate classifiers by hand, a logistic regressor, a decision tree, and a support vector classifier (SVC). Notice we specify 'hard' voting for the voting classifier, which as we discussed is the simple method of choosing the class with the most votes. (This is a binary classification so 2 out of 3 or 3 out of 3 are the only possibilities. For a multiclass classification, in case of a tie vote, the voting classifier may fall back to the probability scores the classifiers give, assuming the provide probability/confidence measures of their prediction). ``` # helper functions to visualize decision boundaries for 2-feature classification tasks # create a scatter plot of the artificial multiclass dataset from matplotlib import cm # visualize the blobs using matplotlib. An example of a funciton we can reuse, since later # we want to plot the decision boundaries along with the scatter plot data def plot_multiclass_data(X, y): """Create a scatter plot of a set of multiclass data. We assume that X has 2 features so that we can plot on a 2D grid, and that y are integer labels [0,1,2,...] with a unique integer label for each class of the dataset. Parameters ---------- X - A (m,2) shaped number array of m samples each with 2 features y - A (m,) shaped vector of integers with the labeled classes of each of the X input features """ # hardcoded to handle only up to 8 classes markers = ['o', '^', 's', 'd', '*', 'p', 'P', 'v'] #colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k'] # determine number of features in the data m = X.shape[0] # determine the class labels labels = np.unique(y) #colors = cm.rainbow(np.linspace(0.0, 1.0, labels.size)) colors = cm.Set1.colors # loop to plot each for label, marker, color in zip(labels, markers, colors): X_label = X[y == label] y_label = y[y == label] label_text = 'Class %s' % label plt.plot(X_label[:,0], X_label[:,1], marker=marker, markersize=8.0, markeredgecolor='k', color=color, alpha=0.5, linestyle='', label=label_text) plt.xlabel('Feature 1') plt.ylabel('Feature 2') plt.legend(); def plot_multiclass_decision_boundaries(model, X, y): from matplotlib.colors import ListedColormap """Use a mesh/grid to create a contour plot that will show the decision boundaries reached by a trained scikit-learn classifier. We expect that the model passed in is a trained scikit-learn classifier that supports/implements a predict() method, that will return predictions for the given set of X data. Parameters ---------- model - A trained scikit-learn classifier that supports prediction using a predict() method X - A (m,2) shaped number array of m samples each with 2 features """ # determine the class labels labels = np.unique(y) #colors = cm.rainbow(np.linspace(0.0, 1.0, labels.size)) #colors = cm.Set1.colors newcmp = ListedColormap(plt.cm.Set1.colors[:len(labels)]) # create the mesh of points to use for the contour plot h = .02 # step size in the mesh x_min, x_max = X[:, 0].min() - h, X[:, 0].max() + h y_min, y_max = X[:, 1].min() - h, X[:, 1].max() + h xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # create the predictions over the mesh using the trained models predict() function Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) # Create the actual contour plot, which will show the decision boundaries Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=newcmp, alpha=0.33) #plt.colorbar() from sklearn.datasets import make_moons X, y = make_moons(n_samples=2500, noise=0.3) # we will split data using a 75%/25% train/test split this time from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) from sklearn.ensemble import VotingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC log_clf = LogisticRegression(solver='lbfgs', C=5.0) tree_clf = DecisionTreeClassifier(max_depth=10) svm_clf = SVC(gamma=100.0, C=1.0) voting_clf = VotingClassifier( estimators=[('lr', log_clf), ('tree', tree_clf), ('svc', svm_clf)], voting='hard' ) voting_clf.fit(X_train, y_train) plot_multiclass_decision_boundaries(voting_clf, X, y) plot_multiclass_data(X, y) ``` Lets look at each classifier's accuracy on the test set, including for the ensemble voting classifier: ``` from sklearn.metrics import accuracy_score for clf in (log_clf, tree_clf, svm_clf, voting_clf): clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(clf.__class__.__name__, accuracy_score(y_test, y_pred)) ``` The voting classifier will usually outperform all the individual classifier, if the data is sufficiently nonseparable to make it relatively hard (e.g. with less random noise in the moons data set, you can get real good performance sometimes with random forest and/or svc, which will exceed the voting classifier). If all classifiers are able to estimate class probabilities (i.e. in `scikit-learn` they support `predict_proba()` method), then you can tell `scikit-learn` to predict the class with the highest class probability, averaged over all individual classifiers. You can think of this as each classifier having its vote weighted by its confidence of the prediction. This is called *soft voting*. It often achieves higher performance than hard voting because it gives more weight to highly confident votes. All you need to do is replace `voting='hard'` with `voting='soft'` and ensure that all classifiers can estimate clas sprobabilities. If you recall, support vector machine classifiers (`SVC`) do not estimate class probabilities by default, but if you set `SVC` `probability` hyperparameter to `True`, the `SVC` class will use cross-validation to estimate class probabilities. This slows training, but it makes the `predict_proba()` method valid for `SVC`, and since both logistic regression and random forests support this confidence estimate, we can then use soft voting for the voting classifier. ``` log_clf = LogisticRegression(solver='lbfgs', C=5.0) tree_clf = DecisionTreeClassifier(max_depth=8) svm_clf = SVC(gamma=1000.0, C=1.0, probability=True) # enable probability estimates for svm classifier voting_clf = VotingClassifier( estimators=[('lr', log_clf), ('tree', tree_clf), ('svc', svm_clf)], voting='soft' # use soft voting this time ) voting_clf.fit(X_train, y_train) plot_multiclass_decision_boundaries(voting_clf, X, y) plot_multiclass_data(X, y) from sklearn.metrics import accuracy_score for clf in (log_clf, tree_clf, svm_clf, voting_clf): clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(clf.__class__.__name__, accuracy_score(y_test, y_pred)) ``` # Bagging and Pasting One way to get a diverse set of classifiers is to use very different training algorithms. The previous voting classifier was an example of this, where we used 3 very different kinds of classifiers for the voting ensemble. Another approach is to use the same training for every predictor, but to train them on different random subsets of the training set. When sampling is performed with replacement, this method is called *bagging* (short for *bootstrap aggregrating*). When sampling is performed without replacement, it is called *pasting*. In other words, both approaches are similar. In both cases you are sampling the training data to build multiple instances of a classifier. In both cases a training item could be sampled and used to train multiple instances in the collection of classifiers that is produced. In bagging, it is possible for a training sample to be sampled multiple times in the training for the same predictor. This type of bootstrap aggregration is a type of data enhancement, and it is used in other contexts as well in ML to artificially increase the size of the training set. Once all predictors are trained, the ensemble can make predictions for a new instance by simply aggregating the predictions of all the predictors. The aggregration function is typically the *statistical mode* (i.e. the most frequent prediction, just like hard voting) for classification, or the average for regression. Each individual predictor has a higher bias than if it were trained on the original training set (because you don't use all of the training data on an individual bagged/pasted classifier). But the aggregration overall should usually reduce both bias and variance on the final performance. Generall the net result is that the ensemble has a similar bias but a lower variance than a single predictor trained on the whole original training set. Computationally bagging and pasting are very attractive because in theory and in practice all of the classifiers can be trained in parallel. Thus if you have a large number of CPU cores, or even a distributed memory computing cluster, you can independently train the individual classifiers all in parallel. ## Scikit-Learn Bagging and Pasting Examples The ensemble API in `scikit-learn` for performing bagging and/or pasting is relatively simple. As with the voting classifier, we specify which type of classifer we want to use. But since bagging/pasting train multiple classifiers all of this type, we only have to specify 1. The `n_jobs` parameter tells `scikit-learn` the number of cpu cores to use for training and predictions (-1 tells `scikit-learn` to use all available cores). The following trains an ensemble of 500 decision tree classifiers (`n_estimators`), each trained on 100 training instances randomly sampled from the training set with replacement (`bootstrap=True`). If you want to use pasting we simply set `bootstrap=False` instead. **NOTE**: The `BaggingClassifier` automatically performs soft voting instead of hard voting if the base classifier can estimate class probabilities (i.e. it has a `predict_proba()` method). ``` from sklearn.ensemble import BaggingClassifier from sklearn.tree import DecisionTreeClassifier bag_clf = BaggingClassifier( DecisionTreeClassifier(max_leaf_nodes=20), n_estimators=500, max_samples=100, bootstrap=True, n_jobs=-1 ) bag_clf.fit(X_train, y_train) y_pred = bag_clf.predict(X_test) print(bag_clf.__class__.__name__, accuracy_score(y_test, y_pred)) plot_multiclass_decision_boundaries(bag_clf, X, y) plot_multiclass_data(X, y) ``` ## Out-of-Bag Evaluation With bagging, some instances may be sampled several times for any given predictor, while others may not be sampled at all. By default a `BaggingClassifier` samples `m` training instances with replacement, where `m` is the size of the training set. This means that only about 63% of the training instances are sampled on average for each predictor. The remaining 37% of the training instances that are not sampled are called *out-of-bag* (oob) instances. **NOTE**: they are not the same 37% for each resulting predictor, each predictor has a different oob. Since a predictor never sees the oob instances during training, it can be evaluated on these instances, without the need for a separate validation set or cross-validation. You can evaluate the ensemble itself by averaging out the oob evaluations for each predictor. In `scikit-learn` you can set `oob_score=True` when creating a `BaggingClassifier` to request an automatic oob evaluation after training: ``` bag_clf = BaggingClassifier( DecisionTreeClassifier(), n_estimators=500, bootstrap=True, n_jobs=-1, oob_score=True ) bag_clf.fit(X_train, y_train) print(bag_clf.oob_score_) y_pred = bag_clf.predict(X_test) print(accuracy_score(y_test, y_pred)) ``` The oob decision function for each training instance is also available through the `oob_decision_function_` variable. ``` bag_clf.oob_decision_function_ ``` ## Random Patches and Random Subspaces The default behavior of the bagging/patching classifier is to only sample the training target outputs. However, it can also be useful to build classifiers that only use some of the feautres of the input data. We have looked at methods for adding features, for example by adding polynomial combinations of the feature inputs. But often for big data, we might have thousands or even millions of input features. In that case, it can very well be that some or many of the features are not really all that useful, or even somewhat harmful, to building a truly good and general classifier. So one approach when we have large number of features is to build multiple classifiers (using bagging/patching) on sampled subsets of the features. In `scikit-learn` `BaggingClassifier` this is controllerd by two hyperparameters: `max_features` and `bootstrap_features`. They work the same as `max_samples` and `bootstrap` but for feature sampling instead of output instance sampling. Thus each predictor will be trained on a random subset of the input features. This is particularly useful when dealing with high-dimensional inputs. Sampling from both training instances and features simultaneously is called the *Random Patches method*. Keeping all training instances, but sampling features is called *Random Subspaces method*. # Random Forests As we have already mentioned, a `RandomForest` is simply an ensemble of decision trees, generally trained via the bagging method, typically with `max_samples` set to the size of the training set. We could create a random forest by hand using `scikit-learn` `BaggingClassifier` on a DecisionTree, which is in fact what we just did in the previous section. Our previous ensemble was an example of a random forest classifier. But in `scikit-learn` instead of building the ensemble somewhat by hant, you can instead use the `RandomForestClassifier` class, which is more convenient and which has default hyperparameter settings optimized for random forests. The following code trains a random forest classifier with 500 treas (each limited to a maximum of 16 nodes), using all available CPU cores: ``` from sklearn.ensemble import RandomForestClassifier rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1) rnd_clf.fit(X_train, y_train) y_pred = rnd_clf.predict(X_test) print(accuracy_score(y_test, y_pred)) ``` A random forest classifier has all of the hyperparameters of a `DecisionTreeClassifier` (to control how trees are grown), plus all of the hyperparameters of a `BaggingClassifier` to control the ensemble itself. The random forest algorithm introduces extra randomness when growing trees. Instead of searching for the very best feature when splitting a node, it searches for the best feature among a random subset of features. This results in a greater tree diversity, which trades a higher bias for a lower variance, generally yielding a better overall ensemble model. The following `BaggingClassifier` is roughly equivalent to the previous `RandomForestClassifier`: ``` bag_clf = BaggingClassifier( DecisionTreeClassifier(splitter='random', max_leaf_nodes=16), n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1 ) bag_clf.fit(X_train, y_train) y_pred = bag_clf.predict(X_test) print(accuracy_score(y_test, y_pred)) ``` ## Extra-Trees When growing a tree in a random forest, at each node only a random subset of features is considered for splitting as we just discussed. It is possible to make trees even more random by also using random thresholds for each feature rather than searching for the best possible thresholds. A forest of such extremely random trees is called an *Extremely Randomized Trees* ensemble (or *Extra-Trees* for short. You can create an extra-trees classifier using `scikit-learn`s `ExtraTreesClassifier` class, its API is identical to the `RandomForestClassifier` class. **TIP:** It is hard to tell in advance whether a random forest or an extra-tree will perform better or worse on a given set of data. Generally the only way to know is to try both and compare them using cross-validation. ## Feature Importance Lastly, if you look at a single decision tree, important features are likely to appear closer to the root of the tree, while unimportnat features will often appear closer to th eleaves (or not a all). Therefore another use of random forests is to get an estimate on the importance of the features when making classification predictions. We can get an estimate of a feature's importance by computing the average depth at which it appears across all trees in a random forest. `scikit-learn` computes this automatically for every feature after training. You can access the result using the `feature_importances_` variable. For example, if we build a `RandomForestClassifier` on the iris data set (with 4 features), we can output each features estimated importance. ``` from sklearn.datasets import load_iris iris = load_iris() rnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1) rnd_clf.fit(iris['data'], iris['target']) for name, score in zip(iris['feature_names'], rnd_clf.feature_importances_): print(name, score) ``` It seems the most importan feature is petal length, followed closely by petal width. Sepal length and especially sepal width are relatively less important. # Boosting *Boosting* (originally called *hypothesis boosting* refers to any ensemble method that can combine several weak learners into a strong learner. But unlike the ensembles we looked at before, the general idea is to train predictors sequentially, each trying to correct it predecessor. There are many boosting methods, the most popular being *AdaBoost* (short for *Adaptive Boosting*) and *Gradient Boosting*. ## AdaBoost ## Gradient Boost # Stacking Stacking works similar to the voting ensembles we have looked at. Multiple independent classifiers are trained in parallel and aggregrated. But instead of using a trivial aggregration method (like hard voting), we train yet another model to perform the aggregration. This final model (called a *blender* or *meta learner*) takes the other trained predictors's output as input and makes a final prediciton from them. 'Scikit-learn' does not support stacking directly (unlike voting ensembles and boosting). But it is not too difficult to hand roll basic implementations of stacking from `scikit-learn` apis. ``` import sys sys.path.append("../../src") # add our class modules to the system PYTHON_PATH from ml_python_class.custom_funcs import version_information version_information() ```
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D2_ModelingPractice/student/W1D2_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Neuromatch Academy: Week1, Day 2, Tutorial 2 #Tutorial objectives We are investigating a simple phenomena, working through the 10 steps of modeling ([Blohm et al., 2019](https://doi.org/10.1523/ENEURO.0352-19.2019)) in two notebooks: **Framing the question** 1. finding a phenomenon and a question to ask about it 2. understanding the state of the art 3. determining the basic ingredients 4. formulating specific, mathematically defined hypotheses **Implementing the model** 5. selecting the toolkit 6. planning the model 7. implementing the model **Model testing** 8. completing the model 9. testing and evaluating the model **Publishing** 10. publishing models We did steps 1-5 in Tutorial 1 and will cover steps 6-10 in Tutorial 2 (this notebook). # Utilities Setup and Convenience Functions Please run the following **3** chunks to have functions and data available. ``` #@title Utilities and setup # set up the environment for this tutorial import time # import time import numpy as np # import numpy import scipy as sp # import scipy from scipy.stats import gamma # import gamma distribution import math # import basic math functions import random # import basic random number generator functions import matplotlib.pyplot as plt # import matplotlib from IPython import display fig_w, fig_h = (12, 8) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) plt.style.use('ggplot') %matplotlib inline #%config InlineBackend.figure_format = 'retina' from scipy.signal import medfilt # make #@title Convenience functions: Plotting and Filtering # define some convenience functions to be used later def my_moving_window(x, window=3, FUN=np.mean): ''' Calculates a moving estimate for a signal Args: x (numpy.ndarray): a vector array of size N window (int): size of the window, must be a positive integer FUN (function): the function to apply to the samples in the window Returns: (numpy.ndarray): a vector array of size N, containing the moving average of x, calculated with a window of size window There are smarter and faster solutions (e.g. using convolution) but this function shows what the output really means. This function skips NaNs, and should not be susceptible to edge effects: it will simply use all the available samples, which means that close to the edges of the signal or close to NaNs, the output will just be based on fewer samples. By default, this function will apply a mean to the samples in the window, but this can be changed to be a max/min/median or other function that returns a single numeric value based on a sequence of values. ''' # if data is a matrix, apply filter to each row: if len(x.shape) == 2: output = np.zeros(x.shape) for rown in range(x.shape[0]): output[rown,:] = my_moving_window(x[rown,:],window=window,FUN=FUN) return output # make output array of the same size as x: output = np.zeros(x.size) # loop through the signal in x for samp_i in range(x.size): values = [] # loop through the window: for wind_i in range(int(-window), 1): if ((samp_i+wind_i) < 0) or (samp_i+wind_i) > (x.size - 1): # out of range continue # sample is in range and not nan, use it: if not(np.isnan(x[samp_i+wind_i])): values += [x[samp_i+wind_i]] # calculate the mean in the window for this point in the output: output[samp_i] = FUN(values) return output def my_plot_percepts(datasets=None, plotconditions=False): if isinstance(datasets,dict): # try to plot the datasets # they should be named... # 'expectations', 'judgments', 'predictions' fig = plt.figure(figsize=(8, 8)) # set aspect ratio = 1? not really plt.ylabel('perceived self motion [m/s]') plt.xlabel('perceived world motion [m/s]') plt.title('perceived velocities') # loop through the entries in datasets # plot them in the appropriate way for k in datasets.keys(): if k == 'expectations': expect = datasets[k] plt.scatter(expect['world'],expect['self'],marker='*',color='xkcd:green',label='my expectations') elif k == 'judgments': judgments = datasets[k] for condition in np.unique(judgments[:,0]): c_idx = np.where(judgments[:,0] == condition)[0] cond_self_motion = judgments[c_idx[0],1] cond_world_motion = judgments[c_idx[0],2] if cond_world_motion == -1 and cond_self_motion == 0: c_label = 'world-motion condition judgments' elif cond_world_motion == 0 and cond_self_motion == 1: c_label = 'self-motion condition judgments' else: c_label = 'condition [%d] judgments'%condition plt.scatter(judgments[c_idx,3],judgments[c_idx,4], label=c_label, alpha=0.2) elif k == 'predictions': predictions = datasets[k] for condition in np.unique(predictions[:,0]): c_idx = np.where(predictions[:,0] == condition)[0] cond_self_motion = predictions[c_idx[0],1] cond_world_motion = predictions[c_idx[0],2] if cond_world_motion == -1 and cond_self_motion == 0: c_label = 'predicted world-motion condition' elif cond_world_motion == 0 and cond_self_motion == 1: c_label = 'predicted self-motion condition' else: c_label = 'condition [%d] prediction'%condition plt.scatter(predictions[c_idx,4],predictions[c_idx,3], marker='x', label=c_label) else: print("datasets keys should be 'hypothesis', 'judgments' and 'predictions'") if plotconditions: # this code is simplified but only works for the dataset we have: plt.scatter([1],[0],marker='<',facecolor='none',edgecolor='xkcd:black',linewidths=2,label='world-motion stimulus',s=80) plt.scatter([0],[1],marker='>',facecolor='none',edgecolor='xkcd:black',linewidths=2,label='self-motion stimulus',s=80) plt.legend(facecolor='xkcd:white') plt.show() else: if datasets is not None: print('datasets argument should be a dict') raise TypeError def my_plot_motion_signals(): dt = 1/10 a = gamma.pdf( np.arange(0,10,dt), 2.5, 0 ) t = np.arange(0,10,dt) v = np.cumsum(a*dt) fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharex='col', sharey='row', figsize=(14,6)) fig.suptitle('Sensory ground truth') ax1.set_title('world-motion condition') ax1.plot(t,-v,label='visual [$m/s$]') ax1.plot(t,np.zeros(a.size),label='vestibular [$m/s^2$]') ax1.set_xlabel('time [s]') ax1.set_ylabel('motion') ax1.legend(facecolor='xkcd:white') ax2.set_title('self-motion condition') ax2.plot(t,-v,label='visual [$m/s$]') ax2.plot(t,a,label='vestibular [$m/s^2$]') ax2.set_xlabel('time [s]') ax2.set_ylabel('motion') ax2.legend(facecolor='xkcd:white') plt.show() def my_plot_sensorysignals(judgments, opticflow, vestibular, returnaxes=False, addaverages=False): wm_idx = np.where(judgments[:,0] == 0) sm_idx = np.where(judgments[:,0] == 1) opticflow = opticflow.transpose() wm_opticflow = np.squeeze(opticflow[:,wm_idx]) sm_opticflow = np.squeeze(opticflow[:,sm_idx]) vestibular = vestibular.transpose() wm_vestibular = np.squeeze(vestibular[:,wm_idx]) sm_vestibular = np.squeeze(vestibular[:,sm_idx]) X = np.arange(0,10,.1) fig, my_axes = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(15,10)) fig.suptitle('Sensory signals') my_axes[0][0].plot(X,wm_opticflow, color='xkcd:light red', alpha=0.1) my_axes[0][0].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[0][0].plot(X,np.average(wm_opticflow, axis=1), color='xkcd:red', alpha=1) my_axes[0][0].set_title('world-motion optic flow') my_axes[0][0].set_ylabel('[motion]') my_axes[0][1].plot(X,sm_opticflow, color='xkcd:azure', alpha=0.1) my_axes[0][1].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[0][1].plot(X,np.average(sm_opticflow, axis=1), color='xkcd:blue', alpha=1) my_axes[0][1].set_title('self-motion optic flow') my_axes[1][0].plot(X,wm_vestibular, color='xkcd:light red', alpha=0.1) my_axes[1][0].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[1][0].plot(X,np.average(wm_vestibular, axis=1), color='xkcd:red', alpha=1) my_axes[1][0].set_title('world-motion vestibular signal') my_axes[1][0].set_xlabel('time [s]') my_axes[1][0].set_ylabel('[motion]') my_axes[1][1].plot(X,sm_vestibular, color='xkcd:azure', alpha=0.1) my_axes[1][1].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[1][1].plot(X,np.average(sm_vestibular, axis=1), color='xkcd:blue', alpha=1) my_axes[1][1].set_title('self-motion vestibular signal') my_axes[1][1].set_xlabel('time [s]') if returnaxes: return my_axes else: plt.show() def my_plot_thresholds(thresholds, world_prop, self_prop, prop_correct): plt.figure(figsize=(12,8)) plt.title('threshold effects') plt.plot([min(thresholds),max(thresholds)],[0,0],':',color='xkcd:black') plt.plot([min(thresholds),max(thresholds)],[0.5,0.5],':',color='xkcd:black') plt.plot([min(thresholds),max(thresholds)],[1,1],':',color='xkcd:black') plt.plot(thresholds, world_prop, label='world motion') plt.plot(thresholds, self_prop, label='self motion') plt.plot(thresholds, prop_correct, color='xkcd:purple', label='correct classification') plt.xlabel('threshold') plt.ylabel('proportion correct or classified as self motion') plt.legend(facecolor='xkcd:white') plt.show() def my_plot_predictions_data(judgments, predictions): conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) # self: conditions_self = np.abs(judgments[:,1]) veljudgmnt_self = judgments[:,3] velpredict_self = predictions[:,3] # world: conditions_world = np.abs(judgments[:,2]) veljudgmnt_world = judgments[:,4] velpredict_world = predictions[:,4] fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharey='row', figsize=(12,5)) ax1.scatter(veljudgmnt_self,velpredict_self, alpha=0.2) ax1.plot([0,1],[0,1],':',color='xkcd:black') ax1.set_title('self-motion judgments') ax1.set_xlabel('observed') ax1.set_ylabel('predicted') ax2.scatter(veljudgmnt_world,velpredict_world, alpha=0.2) ax2.plot([0,1],[0,1],':',color='xkcd:black') ax2.set_title('world-motion judgments') ax2.set_xlabel('observed') ax2.set_ylabel('predicted') plt.show() #@title Data generation code (needs to go on OSF and deleted here) def my_simulate_data(repetitions=100, conditions=[(0,-1),(+1,0)] ): """ Generate simulated data for this tutorial. You do not need to run this yourself. Args: repetitions: (int) number of repetitions of each condition (default: 30) conditions: list of 2-tuples of floats, indicating the self velocity and world velocity in each condition (default: returns data that is good for exploration: [(-1,0),(0,+1)] but can be flexibly extended) The total number of trials used (ntrials) is equal to: repetitions * len(conditions) Returns: dict with three entries: 'judgments': ntrials * 5 matrix 'opticflow': ntrials * 100 matrix 'vestibular': ntrials * 100 matrix The default settings would result in data where first 30 trials reflect a situation where the world (other train) moves in one direction, supposedly at 1 m/s (perhaps to the left: -1) while the participant does not move at all (0), and 30 trials from a second condition, where the world does not move, while the participant moves with 1 m/s in the opposite direction from where the world is moving in the first condition (0,+1). The optic flow should be the same, but the vestibular input is not. """ # reproducible output np.random.seed(1937) # set up some variables: ntrials = repetitions * len(conditions) # the following arrays will contain the simulated data: judgments = np.empty(shape=(ntrials,5)) opticflow = np.empty(shape=(ntrials,100)) vestibular = np.empty(shape=(ntrials,100)) # acceleration: a = gamma.pdf(np.arange(0,10,.1), 2.5, 0 ) # divide by 10 so that velocity scales from 0 to 1 (m/s) # max acceleration ~ .308 m/s^2 # not realistic! should be about 1/10 of that # velocity: v = np.cumsum(a*.1) # position: (not necessary) #x = np.cumsum(v) ################################# # REMOVE ARBITRARY SCALING & CORRECT NOISE PARAMETERS vest_amp = 1 optf_amp = 1 # we start at the first trial: trialN = 0 # we start with only a single velocity, but it should be possible to extend this for conditionno in range(len(conditions)): condition = conditions[conditionno] for repetition in range(repetitions): # # generate optic flow signal OF = v * np.diff(condition) # optic flow: difference between self & world motion OF = (OF * optf_amp) # fairly large spike range OF = OF + (np.random.randn(len(OF)) * .1) # adding noise # generate vestibular signal VS = a * condition[0] # vestibular signal: only self motion VS = (VS * vest_amp) # less range VS = VS + (np.random.randn(len(VS)) * 1.) # acceleration is a smaller signal, what is a good noise level? # store in matrices, corrected for sign #opticflow[trialN,:] = OF * -1 if (np.sign(np.diff(condition)) < 0) else OF #vestibular[trialN,:] = VS * -1 if (np.sign(condition[1]) < 0) else VS opticflow[trialN,:], vestibular[trialN,:] = OF, VS ######################################################### # store conditions in judgments matrix: judgments[trialN,0:3] = [ conditionno, condition[0], condition[1] ] # vestibular SD: 1.0916052957046194 and 0.9112684509277528 # visual SD: 0.10228834313079663 and 0.10975472557444346 # generate judgments: if (abs(np.average(np.cumsum(medfilt(VS/vest_amp,5)*.1)[70:90])) < 1): ########################### # NO self motion detected ########################### selfmotion_weights = np.array([.01,.01]) # there should be low/no self motion worldmotion_weights = np.array([.01,.99]) # world motion is dictated by optic flow else: ######################## # self motion DETECTED ######################## #if (abs(np.average(np.cumsum(medfilt(VS/vest_amp,15)*.1)[70:90]) - np.average(medfilt(OF,15)[70:90])) < 5): if True: #################### # explain all self motion by optic flow selfmotion_weights = np.array([.01,.99]) # there should be lots of self motion, but determined by optic flow worldmotion_weights = np.array([.01,.01]) # very low world motion? else: # we use both optic flow and vestibular info to explain both selfmotion_weights = np.array([ 1, 0]) # motion, but determined by vestibular signal worldmotion_weights = np.array([ 1, 1]) # very low world motion? # integrated_signals = np.array([ np.average( np.cumsum(medfilt(VS/vest_amp,15))[90:100]*.1 ), np.average((medfilt(OF/optf_amp,15))[90:100]) ]) selfmotion = np.sum(integrated_signals * selfmotion_weights) worldmotion = np.sum(integrated_signals * worldmotion_weights) #print(worldmotion,selfmotion) judgments[trialN,3] = abs(selfmotion) judgments[trialN,4] = abs(worldmotion) # this ends the trial loop, so we increment the counter: trialN += 1 return {'judgments':judgments, 'opticflow':opticflow, 'vestibular':vestibular} simulated_data = my_simulate_data() judgments = simulated_data['judgments'] opticflow = simulated_data['opticflow'] vestibular = simulated_data['vestibular'] ``` #Micro-tutorial 6 - planning the model ``` #@title Video: Planning the model from IPython.display import YouTubeVideo video = YouTubeVideo(id='daEtkVporBE', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` ###**Goal:** Identify the key components of the model and how they work together. Our goal all along has been to model our perceptual estimates of sensory data. Now that we have some idea of what we want to do, we need to line up the components of the model: what are the input and output? Which computations are done and in what order? The figure below shows a generic model we will use to guide our code construction. ![Model as code](https://i.ibb.co/hZdHmkk/modelfigure.jpg) Our model will have: * **inputs**: the values the system has available - for this tutorial the sensory information in a trial. We want to gather these together and plan how to process them. * **parameters**: unless we are lucky, our functions will have unknown parameters - we want to identify these and plan for them. * **outputs**: these are the predictions our model will make - for this tutorial these are the perceptual judgments on each trial. Ideally these are directly comparable to our data. * **Model functions**: A set of functions that perform the hypothesized computations. >Using Python (with Numpy and Scipy) we will define a set of functions that take our data and some parameters as input, can run our model, and output a prediction for the judgment data. #Recap of what we've accomplished so far: To model perceptual estimates from our sensory data, we need to 1. _integrate_ to ensure sensory information are in appropriate units 2. _reduce noise and set timescale_ by filtering 3. _threshold_ to model detection Remember the kind of operations we identified: * integration: `np.cumsum()` * filtering: `my_moving_window()` * threshold: `if` with a comparison (`>` or `<`) and `else` We will collect all the components we've developed and design the code by: 1. **identifying the key functions** we need 2. **sketching the operations** needed in each. **_Planning our model:_** We know what we want the model to do, but we need to plan and organize the model into functions and operations. We're providing a draft of the first function. For each of the two other code chunks, write mostly comments and help text first. This should put into words what role each of the functions plays in the overall model, implementing one of the steps decided above. _______ Below is the main function with a detailed explanation of what the function is supposed to do: what input is expected, and what output will generated. The code is not complete, and only returns nans for now. However, this outlines how most model code works: it gets some measured data (the sensory signals) and a set of parameters as input, and as output returns a prediction on other measured data (the velocity judgments). The goal of this function is to define the top level of a simulation model which: * receives all input * loops through the cases * calls functions that computes predicted values for each case * outputs the predictions ### **TD 6.1**: Complete main model function The function `my_train_illusion_model()` below should call one other function: `my_perceived_motion()`. What input do you think this function should get? **Complete main model function** ``` def my_train_illusion_model(sensorydata, params): ''' Generate output predictions of perceived self-motion and perceived world-motion velocity based on input visual and vestibular signals. Args (Input variables passed into function): sensorydata: (dict) dictionary with two named entries: opticflow: (numpy.ndarray of float) NxM array with N trials on rows and M visual signal samples in columns vestibular: (numpy.ndarray of float) NxM array with N trials on rows and M vestibular signal samples in columns params: (dict) dictionary with named entries: threshold: (float) vestibular threshold for credit assignment filterwindow: (list of int) determines the strength of filtering for the visual and vestibular signals, respectively integrate (bool): whether to integrate the vestibular signals, will be set to True if absent FUN (function): function used in the filter, will be set to np.mean if absent samplingrate (float): the number of samples per second in the sensory data, will be set to 10 if absent Returns: dict with two entries: selfmotion: (numpy.ndarray) vector array of length N, with predictions of perceived self motion worldmotion: (numpy.ndarray) vector array of length N, with predictions of perceived world motion ''' # sanitize input a little if not('FUN' in params.keys()): params['FUN'] = np.mean if not('integrate' in params.keys()): params['integrate'] = True if not('samplingrate' in params.keys()): params['samplingrate'] = 10 # number of trials: ntrials = sensorydata['opticflow'].shape[0] # set up variables to collect output selfmotion = np.empty(ntrials) worldmotion = np.empty(ntrials) # loop through trials? for trialN in range(ntrials): #these are our sensory variables (inputs) vis = sensorydata['opticflow'][trialN,:] ves = sensorydata['vestibular'][trialN,:] ######################################################## # generate output predicted perception: ######################################################## #our inputs our vis, ves, and params selfmotion[trialN], worldmotion[trialN] = [np.nan, np.nan] ######################################################## # replace above with # selfmotion[trialN], worldmotion[trialN] = my_perceived_motion( ???, ???, params=params) # and fill in question marks ######################################################## # comment this out when you've filled raise NotImplementedError("Student excercise: generate predictions") return {'selfmotion':selfmotion, 'worldmotion':worldmotion} # uncomment the following lines to run the main model function: ## here is a mock version of my_perceived motion. ## so you can test my_train_illusion_model() #def my_perceived_motion(*args, **kwargs): #return np.random.rand(2) ##let's look at the preditions we generated for two sample trials (0,100) ##we should get a 1x2 vector of self-motion prediction and another for world-motion #sensorydata={'opticflow':opticflow[[0,100],:0], 'vestibular':vestibular[[0,100],:0]} #params={'threshold':0.33, 'filterwindow':[100,50]} #my_train_illusion_model(sensorydata=sensorydata, params=params) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_685e0a13.py) ### **TD 6.2**: Draft perceived motion functions Now we draft a set of functions, the first of which is used in the main model function (see above) and serves to generate perceived velocities. The other two are used in the first one. Only write help text and/or comments, you don't have to write the whole function. Each time ask yourself these questions: * what sensory data is necessary? * what other input does the function need, if any? * which operations are performed on the input? * what is the output? (the number of arguments is correct) **Template perceived motion** ``` # fill in the input arguments the function should have: # write the help text for the function: def my_perceived_motion(arg1, arg2, arg3): ''' Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument argument 3: explain the format and content of the third argument Returns: what output does the function generate? Any further description? ''' # structure your code into two functions: "my_selfmotion" and "my_worldmotion" # write comments outlining the operations to be performed on the inputs by each of these functions # use the elements from micro-tutorials 3, 4, and 5 (found in W1D2 Tutorial Part 1) # # # # what kind of output should this function produce? return output ``` We've completed the `my_perceived_motion()` function for you below. Follow this example to complete the template for `my_selfmotion()` and `my_worldmotion()`. Write out the inputs and outputs, and the steps required to calculate the outputs from the inputs. **Perceived motion function** ``` #Full perceived motion function def my_perceived_motion(vis, ves, params): ''' Takes sensory data and parameters and returns predicted percepts Args: vis (numpy.ndarray): 1xM array of optic flow velocity data ves (numpy.ndarray): 1xM array of vestibular acceleration data params: (dict) dictionary with named entries: see my_train_illusion_model() for details Returns: [list of floats]: prediction for perceived self-motion based on vestibular data, and prediction for perceived world-motion based on perceived self-motion and visual data ''' # estimate self motion based on only the vestibular data # pass on the parameters selfmotion = my_selfmotion(ves=ves, params=params) # estimate the world motion, based on the selfmotion and visual data # pass on the parameters as well worldmotion = my_worldmotion(vis=vis, selfmotion=selfmotion, params=params) return [selfmotion, worldmotion] ``` **Template calculate self motion** Put notes in the function below that describe the inputs, the outputs, and steps that transform the output from the input using elements from micro-tutorials 3,4,5. ``` def my_selfmotion(arg1, arg2): ''' Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument Returns: what output does the function generate? Any further description? ''' # what operations do we perform on the input? # use the elements from micro-tutorials 3, 4, and 5 # 1. # 2. # 3. # 4. # what output should this function produce? return output ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_181325a9.py) **Template calculate world motion** Put notes in the function below that describe the inputs, the outputs, and steps that transform the output from the input using elements from micro-tutorials 3,4,5. ``` def my_worldmotion(arg1, arg2, arg3): ''' Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument argument 3: explain the format and content of the third argument Returns: what output does the function generate? Any further description? ''' # what operations do we perform on the input? # use the elements from micro-tutorials 3, 4, and 5 # 1. # 2. # 3. # what output should this function produce? return output ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_8f913582.py) #Micro-tutorial 7 - implement model ``` #@title Video: implement the model from IPython.display import YouTubeVideo video = YouTubeVideo(id='gtSOekY8jkw', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` **Goal:** We write the components of the model in actual code. For the operations we picked, there function ready to use: * integration: `np.cumsum(data, axis=1)` (axis=1: per trial and over samples) * filtering: `my_moving_window(data, window)` (window: int, default 3) * average: `np.mean(data)` * threshold: if (value > thr): <operation 1> else: <operation 2> ###**TD 7.1:** Write code to estimate self motion Use the operations to finish writing the function that will calculate an estimate of self motion. Fill in the descriptive list of items with actual operations. Use the function for estimating world-motion below, which we've filled for you! **Template finish self motion function** ``` def my_selfmotion(ves, params): ''' Estimates self motion for one vestibular signal Args: ves (numpy.ndarray): 1xM array with a vestibular signal params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of self motion in m/s ''' ###uncomment the code below and fill in with your code ## 1. integrate vestibular signal #ves = np.cumsum(ves*(1/params['samplingrate'])) ## 2. running window function to accumulate evidence: #selfmotion = YOUR CODE HERE ## 3. take final value of self-motion vector as our estimate #selfmotion = ## 4. compare to threshold. Hint the threshodl is stored in params['threshold'] ## if selfmotion is higher than threshold: return value ## if it's lower than threshold: return 0 #if YOURCODEHERE #selfmotion = YOURCODHERE # comment this out when you've filled raise NotImplementedError("Student excercise: estimate my_selfmotion") return output ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_3ea16348.py) ### Estimate world motion We have completed the `my_worldmotion()` function for you. **World motion function** ``` # World motion function def my_worldmotion(vis, selfmotion, params): ''' Short description of the function Args: vis (numpy.ndarray): 1xM array with the optic flow signal selfmotion (float): estimate of self motion params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of world motion in m/s ''' # running average to smooth/accumulate sensory evidence visualmotion = my_moving_window(vis, window=params['filterwindows'][1], FUN=np.mean) # take final value visualmotion = visualmotion[-1] # subtract selfmotion from value worldmotion = visualmotion + selfmotion # return final value return worldmotion ``` #Micro-tutorial 8 - completing the model ``` #@title Video: completing the model from IPython.display import YouTubeVideo video = YouTubeVideo(id='-NiHSv4xCDs', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` **Goal:** Make sure the model can speak to the hypothesis. Eliminate all the parameters that do not speak to the hypothesis. Now that we have a working model, we can keep improving it, but at some point we need to decide that it is finished. Once we have a model that displays the properties of a system we are interested in, it should be possible to say something about our hypothesis and question. Keeping the model simple makes it easier to understand the phenomenon and answer the research question. Here that means that our model should have illusory perception, and perhaps make similar judgments to those of the participants, but not much more. To test this, we will run the model, store the output and plot the models' perceived self motion over perceived world motion, like we did with the actual perceptual judgments (it even uses the same plotting function). ### **TD 8.1:** See if the model produces illusions ``` #@title Run to plot model predictions of motion estimates # prepare to run the model again: data = {'opticflow':opticflow, 'vestibular':vestibular} params = {'threshold':0.6, 'filterwindows':[100,50], 'FUN':np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # process the data to allow plotting... predictions = np.zeros(judgments.shape) predictions[:,0:3] = judgments[:,0:3] predictions[:,3] = modelpredictions['selfmotion'] predictions[:,4] = modelpredictions['worldmotion'] *-1 my_plot_percepts(datasets={'predictions':predictions}, plotconditions=True) ``` **Questions:** * Why is the data distributed this way? How does it compare to the plot in TD 1.2? * Did you expect to see this? * Where do the model's predicted judgments for each of the two conditions fall? * How does this compare to the behavioral data? However, the main observation should be that **there are illusions**: the blue and red data points are mixed in each of the two sets of data. Does this mean the model can help us understand the phenomenon? #Micro-tutorial 9 - testing and evaluating the model ``` #@title Video: Background from IPython.display import YouTubeVideo video = YouTubeVideo(id='5vnDOxN3M_k', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` **Goal:** Once we have finished the model, we need a description of how good it is. The question and goals we set in micro-tutorial 1 and 4 help here. There are multiple ways to evaluate a model. Aside from the obvious fact that we want to get insight into the phenomenon that is not directly accessible without the model, we always want to quantify how well the model agrees with the data. ### Quantify model quality with $R^2$ Let's look at how well our model matches the actual judgment data. ``` #@title Run to plot predictions over data my_plot_predictions_data(judgments, predictions) ``` When model predictions are correct, the red points in the figure above should lie along the identity line (a dotted black line here). Points off the identity line represent model prediction errors. While in each plot we see two clusters of dots that are fairly close to the identity line, there are also two clusters that are not. For the trials that those points represent, the model has an illusion while the participants don't or vice versa. We will use a straightforward, quantitative measure of how good the model is: $R^2$ (pronounced: "R-squared"), which can take values between 0 and 1, and expresses how much variance is explained by the relationship between two variables (here the model's predictions and the actual judgments). It is also called [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination), and is calculated here as the square of the correlation coefficient (r or $\rho$). Just run the chunk below: ``` #@title Run to calculate R^2 conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(conditions,veljudgmnt) print('conditions -> judgments R^2: %0.3f'%( r_value**2 )) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(veljudgmnt,velpredict) print('predictions -> judgments R^2: %0.3f'%( r_value**2 )) ``` These $R^2$s express how well the experimental conditions explain the participants judgments and how well the models predicted judgments explain the participants judgments. You will learn much more about model fitting, quantitative model evaluation and model comparison tomorrow! Perhaps the $R^2$ values don't seem very impressive, but the judgments produced by the participants are explained by the model's predictions better than by the actual conditions. In other words: the model tends to have the same illusions as the participants. ### **TD 9.1** Varying the threshold parameter to improve the model In the code below, see if you can find a better value for the threshold parameter, to reduce errors in the models' predictions. **Testing thresholds** ``` # Testing thresholds def test_threshold(threshold=0.33): # prepare to run model data = {'opticflow':opticflow, 'vestibular':vestibular} params = {'threshold':threshold, 'filterwindows':[100,50], 'FUN':np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # get predictions in matrix predictions = np.zeros(judgments.shape) predictions[:,0:3] = judgments[:,0:3] predictions[:,3] = modelpredictions['selfmotion'] predictions[:,4] = modelpredictions['worldmotion'] *-1 # get percepts from participants and model conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) # calculate R2 slope, intercept, r_value, p_value, std_err = sp.stats.linregress(veljudgmnt,velpredict) print('predictions -> judgments R2: %0.3f'%( r_value**2 )) test_threshold(threshold=0.5) ``` ### **TD 9.2:** Credit assigmnent of self motion When we look at the figure in **TD 8.1**, we can see a cluster does seem very close to (1,0), just like in the actual data. The cluster of points at (1,0) are from the case where we conclude there is no self motion, and then set the self motion to 0. That value of 0 removes a lot of noise from the world-motion estimates, and all noise from the self-motion estimate. In the other case, where there is self motion, we still have a lot of noise (see also micro-tutorial 4). Let's change our `my_selfmotion()` function to return a self motion of 1 when the vestibular signal indicates we are above threshold, and 0 when we are below threshold. Edit the function here. **Template function for credit assigment of self motion** ``` # Template binary self-motion estimates def my_selfmotion(ves, params): ''' Estimates self motion for one vestibular signal Args: ves (numpy.ndarray): 1xM array with a vestibular signal params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of self motion in m/s ''' # integrate signal: ves = np.cumsum(ves*(1/params['samplingrate'])) # use running window to accumulate evidence: selfmotion = my_moving_window(ves, window=params['filterwindows'][0], FUN=params['FUN']) ## take the final value as our estimate: selfmotion = selfmotion[-1] ########################################## # this last part will have to be changed # compare to threshold, set to 0 if lower and else... if selfmotion < params['threshold']: selfmotion = 0 #uncomment the lines below and fill in with your code #else: #YOUR CODE HERE # comment this out when you've filled raise NotImplementedError("Student excercise: modify with credit assignment") return selfmotion ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_90571e21.py) The function you just wrote will be used when we run the model again below. ``` #@title Run model credit assigment of self motion # prepare to run the model again: data = {'opticflow':opticflow, 'vestibular':vestibular} params = {'threshold':0.33, 'filterwindows':[100,50], 'FUN':np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # no process the data to allow plotting... predictions = np.zeros(judgments.shape) predictions[:,0:3] = judgments[:,0:3] predictions[:,3] = modelpredictions['selfmotion'] predictions[:,4] = modelpredictions['worldmotion'] *-1 my_plot_percepts(datasets={'predictions':predictions}, plotconditions=False) ``` That looks much better, and closer to the actual data. Let's see if the $R^2$ values have improved: ``` #@title Run to calculate R^2 for model with self motion credit assignment conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) my_plot_predictions_data(judgments, predictions) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(conditions,veljudgmnt) print('conditions -> judgments R2: %0.3f'%( r_value**2 )) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(velpredict,veljudgmnt) print('predictions -> judgments R2: %0.3f'%( r_value**2 )) ``` While the model still predicts velocity judgments better than the conditions (i.e. the model predicts illusions in somewhat similar cases), the $R^2$ values are actually worse than those of the simpler model. What's really going on is that the same set of points that were model prediction errors in the previous model are also errors here. All we have done is reduce the spread. ### Interpret the model's meaning Here's what you should have learned: 1. A noisy, vestibular, acceleration signal can give rise to illusory motion. 2. However, disambiguating the optic flow by adding the vestibular signal simply adds a lot of noise. This is not a plausible thing for the brain to do. 3. Our other hypothesis - credit assignment - is more qualitatively correct, but our simulations were not able to match the frequency of the illusion on a trial-by-trial basis. _It's always possible to refine our models to improve the fits._ There are many ways to try to do this. A few examples; we could implement a full sensory cue integration model, perhaps with Kalman filters (Week 2, Day 3), or we could add prior knowledge (at what time do the trains depart?). However, we decided that for now we have learned enough, so it's time to write it up. # Micro-tutorial 10 - publishing the model ``` #@title Video: Background from IPython.display import YouTubeVideo video = YouTubeVideo(id='kf4aauCr5vA', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` **Goal:** In order for our model to impact the field, it needs to be accepted by our peers, and order for that to happen it matters how the model is published. ### **TD 10.1:** Write a summary of the project Here we will write up our model, by answering the following questions: * **What is the phenomena**? Here summarize the part of the phenomena which your model addresses. * **What is the key scientific question?**: Clearly articulate the question which your model tries to answer. * **What was our hypothesis?**: Explain the key relationships which we relied on to simulate the phenomena. * **How did your model work?** Give an overview of the model, it's main components, and how the model works. ''Here we ... '' * **What did we find? Did the model work?** Explain the key outcomes of your model evaluation. * **What can we conclude?** Conclude as much as you can _with reference to the hypothesis_, within the limits of the model. * **What did you learn? What is left to be learned?** Briefly argue the plausibility of the approach and what you think is _essential_ that may have been left out. ### Guidance for the future There are good guidelines for structuring and writing an effective paper (e.g. [Mensh & Kording, 2017](https://doi.org/10.1371/journal.pcbi.1005619)), all of which apply to papers about models. There are some extra considerations when publishing a model. In general, you should explain each of the steps in the paper: **Introduction:** Steps 1 & 2 (maybe 3) **Methods:** Steps 3-7, 9 **Results:** Steps 8 & 9, going back to 1, 2 & 4 In addition, you should provide a visualization of the model, and upload the code implementing the model and the data it was trained and tested on to a repository (e.g. GitHub and OSF). The audience for all of this should be experimentalists, as they are the ones who can test predictions made by your your model and collect new data. This way your models can impact future experiments, and that future data can then be modeled (see modeling process schematic below). Remember your audience - it is _always_ hard to clearly convey the main points of your work to others, especially if your audience doesn't necessarily create computational models themselves. ![how-to-model process from Blohm et al 2019](https://deniseh.lab.yorku.ca/files/2020/06/HowToModel-ENEURO.0352-19.2019.full_.pdf.png) ### Suggestion For every modeling project, a very good exercise in this is to _**first**_ write a short, 100-word abstract of the project plan and expected impact, like the summary you wrote. This forces focussing on the main points: describing the relevance, question, model, answer and what it all means very succinctly. This allows you to decide to do this project or not **before you commit time writing code for no good purpose**. Notice that this is really what we've walked you through carefully in this tutorial! :) # Post-script Note that the model we built here was extremely simple and used artificial data on purpose. It allowed us to go through all the steps of building a model, and hopefully you noticed that it is not always a linear process, you will go back to different steps if you hit a roadblock somewhere. However, if you're interested in how to actually approach modeling a similar phenomenon in a probabilistic way, we encourage you to read the paper by [Dokka et. al., 2019](https://doi.org/10.1073/pnas.1820373116), where the authors model how judgments of heading direction are influenced by objects that are also moving. # Reading Blohm G, Kording KP, Schrater PR (2020). _A How-to-Model Guide for Neuroscience_ eNeuro, 7(1) ENEURO.0352-19.2019. https://doi.org/10.1523/ENEURO.0352-19.2019 Dokka K, Park H, Jansen M, DeAngelis GC, Angelaki DE (2019). _Causal inference accounts for heading perception in the presence of object motion._ PNAS, 116(18):9060-9065. https://doi.org/10.1073/pnas.1820373116 Drugowitsch J, DeAngelis GC, Klier EM, Angelaki DE, Pouget A (2014). _Optimal Multisensory Decision-Making in a Reaction-Time Task._ eLife, 3:e03005. https://doi.org/10.7554/eLife.03005 Hartmann, M, Haller K, Moser I, Hossner E-J, Mast FW (2014). _Direction detection thresholds of passive self-motion in artistic gymnasts._ Exp Brain Res, 232:1249–1258. https://doi.org/10.1007/s00221-014-3841-0 Mensh B, Kording K (2017). _Ten simple rules for structuring papers._ PLoS Comput Biol 13(9): e1005619. https://doi.org/10.1371/journal.pcbi.1005619 Seno T, Fukuda H (2012). _Stimulus Meanings Alter Illusory Self-Motion (Vection) - Experimental Examination of the Train Illusion._ Seeing Perceiving, 25(6):631-45. https://doi.org/10.1163/18784763-00002394
github_jupyter
# Welcome to nbdev > Create delightful python projects using Jupyter Notebooks - image:images/nbdev_source.gif `nbdev` is a library that allows you to develop a python library in [Jupyter Notebooks](https://jupyter.org/), putting all your code, tests and documentation in one place. That is: you now have a true [literate programming](https://en.wikipedia.org/wiki/Literate_programming) environment, as envisioned by Donald Knuth back in 1983! `nbdev` makes debugging and refactor your code much easier relative to traditional programming environments. Furthermore, using nbdev promotes software engineering best practices because tests and documentation are first class citizens. ## Features of Nbdev `nbdev` provides the following tools for developers: - **Automatically generate docs** from Jupyter notebooks. These docs are searchable and automatically hyperlinked to appropriate documentation pages by introspecting keywords you surround in backticks. - Utilities to **automate the publishing of pypi and conda packages** including version number management. - A robust, **two-way sync between notebooks and source code**, which allow you to use your IDE for code navigation or quick edits if desired. - **Fine-grained control on hiding/showing cells**: you can choose to hide entire cells, just the output, or just the input. Furthermore, you can embed cells in collapsible elements that are open or closed by default. - Ability to **write tests directly in notebooks** without having to learn special APIs. These tests get executed in parallel with a single CLI command. You can even define certain groups of tests such that you don't have to always run long-running tests. - Tools for **merge/conflict resolution** with notebooks in a **human readable format**. - **Continuous integration (CI) comes setup for you with [GitHub Actions](https://github.com/features/actions)** out of the box, that will run tests automatically for you. Even if you are not familiar with CI or GitHub Actions, this starts working right away for you without any manual intervention. - **Integration With GitHub Pages for docs hosting**: nbdev allows you to easily host your documentation for free, using GitHub pages. - Create Python modules, following **best practices such as automatically defining `__all__`** ([more details](http://xion.io/post/code/python-all-wild-imports.html)) with your exported functions, classes, and variables. - **Math equation support** with LaTeX. - ... and much more! See the [Getting Started](https://nbdev.fast.ai/#Getting-Started) section below for more information. ## A Motivating Example For example, lets define a class that represents a playing card, with associated docs and tests in a Jupyter Notebook: ![image.png](images/att_00027.png) In the above screenshot, we have code, tests and documentation in one context! `nbdev` renders this into searchable docs (which are optionally hosted for free on GitHub Pages). Below is an annotated screenshot of the generated docs for further explanation: ![image.png](images/att_00016.png) The above illustration is a subset of [this nbdev tutorial with a minimal example](https://nbdev.fast.ai/example.html), which uses code from [Think Python 2](https://github.com/AllenDowney/ThinkPython2) by Allen Downey. ### Explanation of annotations: 1. The heading **Card** corresponds to the first `H1` heading in a notebook with a note block _API Details_ as the summary. 2. `nbdev` automatically renders a Table of Contents for you. 3. `nbdev` automatically renders the signature of your class or function as a heading. 4. The cells where your code is defined will be hidden and replaced by standardized documentation of your function, showing its name, arguments, docstring, and link to the source code on github. 5. This part of docs is rendered automatically from the docstring. 6. The rest of the notebook is rendered as usual. You can hide entire cells, hide only cell input or hide only output by using the [flags described on this page](https://nbdev.fast.ai/export2html.html). 7. nbdev supports special block quotes that render as colored boxes in the documentation. You can read more about them [here](https://nbdev.fast.ai/export2html.html#add_jekyll_notes). In this specific example, we are using the `Note` block quote. 8. Words you surround in backticks will be automatically hyperlinked to the associated documentation where appropriate. This is a trivial case where `Card` class is defined immediately above, however this works across pages and modules. We will see another example of this in later steps. ## Installing nbdev is on PyPI and conda so you can just run `pip install nbdev` or `conda install -c fastai nbdev`. For an [editable install](https://stackoverflow.com/questions/35064426/when-would-the-e-editable-option-be-useful-with-pip-install), use the following: ``` git clone https://github.com/fastai/nbdev pip install -e nbdev ``` _Note that `nbdev` must be installed into the same python environment that you use for both your Jupyter Server and your workspace._ ## Getting Started The following are helpful resources for getting started with nbdev: - The [tutorial](https://nbdev.fast.ai/tutorial.html). - A [minimal, end-to-end example](https://nbdev.fast.ai/example.html) of using nbdev. We suggest replicating this example after reading through the tutorial to solidify your understanding. - The [docs](https://nbdev.fast.ai/). - [release notes](https://github.com/fastai/nbdev/blob/master/CHANGELOG.md). ## If Someone Tells You That You Shouldn't Use Notebooks For Software Development [Watch this video](https://youtu.be/9Q6sLbz37gk). ## Contributing If you want to contribute to `nbdev`, be sure to review the [contributions guidelines](https://github.com/fastai/nbdev/blob/master/CONTRIBUTING.md). This project adheres to fastai`s [code of conduct](https://github.com/fastai/nbdev/blob/master/CODE-OF-CONDUCT.md). By participating, you are expected to uphold this code. In general, the fastai project strives to abide by generally accepted best practices in open-source software development. Make sure you have the git hooks we use installed by running ``` nbdev_install_git_hooks ``` in the cloned repository folder. ## Copyright Copyright 2019 onwards, fast.ai, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this project's files except in compliance with the License. A copy of the License is provided in the LICENSE file in this repository. ## Appendix ### nbdev and fastai `nbdev` has been used to build innovative software used by many developers, such as [fastai](https://docs.fast.ai/), a deep learning library which implements a [unique layered api and callback system](https://arxiv.org/abs/2002.04688), and [fastcore](https://fastcore.fast.ai/), an extension to the Python programming language. Furthermore, `nbdev` allows a very small number of developers to maintain and grow a [large ecosystem](https://github.com/fastai) of software engineering, data science, machine learning and devops tools. Here, for instance, is how `combined_cos` is defined and documented in the `fastai` library: <img alt="Exporting from nbdev" width="700" caption="An example of a function defined in one cell (marked with the export flag) and explained, along with a visual example, in the following cells" src="images/export_example.png" />
github_jupyter
# Study Path And Where To Find Resources **Author: Yulun Wu** Welcome aboard! AI is one of the most prospective fields today. Personally I believe AI technology will start a technology revolution and totally revamp the world as well as our lives. The definition of AI is broad, in AIwaffle Courses, *AI*, *Machine Learning*, *Deep Learning* means similar things, since deep learning is the mostly focused subfield in ML, which is the basis of AI technology. To get started in Machine Learning, there is some basic skills you should acquire. ## Python If you don't know what is this, why are you reading? Go learn it first! ## Linear Algebra: vectors, matrix multiplication The AIwaffle Courses only require a subset of linear algebra. If you know vectors and matrix multiplication, you are good to go. Useful resources: https://www.khanacademy.org/math/linear-algebra https://brilliant.org/wiki/matrices/ ## Calculus: partial derivitives, gradients That's all you need for now. Useful resources: https://www.khanacademy.org/math/multivariable-calculus https://brilliant.org/wiki/partial-derivatives/ ## Libraries for ML **Must**: [Numpy](https://numpy.org/), [Pytorch](https://pytorch.org/) Graphing library like: [Matplotlib](https://matplotlib.org/) or its high-level API [Seaborn](http://seaborn.pydata.org/index.html) or others. Remember: You don't have to be proficient at them. The AIwaffle Courses will also lead you through Pytorch. The best is: if you are good at self-studying, skip AIwaffle Course 2-7 by going to pytorch.org ## Other Useful Resources Videos from 3b1b - Make sure to watch them before you start an AIwaffle Course! These videos give you an intuitive understanding on *Neural Networks* and *Deep Learning*. [Youtube](https://www.youtube.com/playlist?list=PLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi) | [bilibili](https://space.bilibili.com/88461692/channel/detail?cid=26587) [Google Machine Learning Glossary](https://developers.google.com/machine-learning/glossary/) for you geeks [A lot of ML Cheat Sheets](https://becominghuman.ai/cheat-sheets-for-ai-neural-networks-machine-learning-deep-learning-big-data-science-pdf-f22dc900d2d7): Most of them are useless. Use at your own risk. ## Where to ask questions Create an issue in our [Github repo](https://github.com/AIwaffle/AIwaffle) Ask in our Forum: TBD Send an email to [email protected] Enough talk, jump into the next AIwaffle Course **Pytorch: Tensor Manipulation** to get your hands dirty!
github_jupyter
``` from nbdev import * %nbdev_default_export merge #export from nbdev.imports import * ``` # Fix merge conflicts > Fix merge conflicts in jupyter notebooks When working with jupyter notebooks (which are json files behind the scenes) and GitHub, it is very common that a merge conflict (that will add new lines in the notebook source file) will break some notebooks you are working on. This module defines the function `fix_conflicts` to fix those notebooks for you, and attempt to automatically merge standard conflicts. The remaining ones will be delimited by markdown cells like this: <img alt="Fixed notebook" width="700" caption="A notebook fixed after a merged conflict. The file couldn't be opened before the command was run, but after it the conflict is higlighted by markdown cells." src="images/merge.PNG" /> ## Walk cells ``` #hide tst_nb="""{ "cells": [ { "cell_type": "code", <<<<<<< HEAD "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "3" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z=3\n", "z" ] }, { "cell_type": "code", "execution_count": 7, ======= "execution_count": 5, >>>>>>> a7ec1b0bfb8e23b05fd0a2e6cafcb41cd0fb1c35 "metadata": {}, "outputs": [ { "data": { "text/plain": [ "6" ] }, <<<<<<< HEAD "execution_count": 7, ======= "execution_count": 5, >>>>>>> a7ec1b0bfb8e23b05fd0a2e6cafcb41cd0fb1c35 "metadata": {}, "output_type": "execute_result" } ], "source": [ "x=3\n", "y=3\n", "x+y" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" } }, "nbformat": 4, "nbformat_minor": 2 }""" ``` This is an example of broken notebook we defined in `tst_nb`. The json format is broken by the lines automatically added by git. Such a file can't be opened again in jupyter notebook, leaving the user with no other choice than to fix the text file manually. ``` print(tst_nb) ``` Note that in this example, the second conflict is easily solved: it just concerns the execution count of the second cell and can be solved by choosing either option without really impacting your notebook. This is the kind of conflicts `fix_conflicts` will (by default) fix automatically. The first conflict is more complicated as it spans across two cells and there is a cell present in one version, not the other. Such a conflict (and generally the ones where the inputs of the cells change form one version to the other) aren't automatically fixed, but `fix_conflicts` will return a proper json file where the annotations introduced by git will be placed in markdown cells. The first step to do this is to walk the raw text file to extract the cells. We can't read it as a JSON since it's broken, so we have to parse the text. ``` #export def extract_cells(raw_txt): "Manually extract cells in potential broken json `raw_txt`" lines = raw_txt.split('\n') cells = [] i = 0 while not lines[i].startswith(' "cells"'): i+=1 i += 1 start = '\n'.join(lines[:i]) while lines[i] != ' ],': while lines[i] != ' {': i+=1 j = i while not lines[j].startswith(' }'): j+=1 c = '\n'.join(lines[i:j+1]) if not c.endswith(','): c = c + ',' cells.append(c) i = j+1 end = '\n'.join(lines[i:]) return start,cells,end ``` This function returns the beginning of the text (before the cells are defined), the list of cells and the end of the text (after the cells are defined). ``` start,cells,end = extract_cells(tst_nb) test_eq(len(cells), 3) test_eq(cells[0], """ { "cell_type": "code", <<<<<<< HEAD "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "3" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z=3\n", "z" ] },""") #hide #Test the whole text is there #We add a , to the last cell (because we might add some after for merge conflicts at the end, so we need to remove it) test_eq(tst_nb, '\n'.join([start] + cells[:-1] + [cells[-1][:-1]] + [end])) ``` When walking the broken cells, we will add conflicts marker before and after the cells with conflicts as markdown cells. To do that we use this function. ``` #export def get_md_cell(txt): "A markdown cell with `txt`" return ''' { "cell_type": "markdown", "metadata": {}, "source": [ "''' + txt + '''" ] },''' tst = ''' { "cell_type": "markdown", "metadata": {}, "source": [ "A bit of markdown" ] },''' assert get_md_cell("A bit of markdown") == tst #export conflicts = '<<<<<<< ======= >>>>>>>'.split() #export def _split_cell(cell, cf, names): "Split `cell` between `conflicts` given state in `cf`, save `names` of branches if seen" res1,res2 = [],[] for line in cell.split('\n'): if line.startswith(conflicts[cf]): if names[cf//2] is None: names[cf//2] = line[8:] cf = (cf+1)%3 continue if cf<2: res1.append(line) if cf%2==0: res2.append(line) return '\n'.join(res1),'\n'.join(res2),cf,names #hide tst = '\n'.join(['a', f'{conflicts[0]} HEAD', 'b', conflicts[1], 'c', f'{conflicts[2]} lala', 'd']) v1,v2,cf,names = _split_cell(tst, 0, [None,None]) assert v1 == 'a\nb\nd' assert v2 == 'a\nc\nd' assert cf == 0 assert names == ['HEAD', 'lala'] #hide tst = '\n'.join(['a', f'{conflicts[0]} HEAD', 'b', conflicts[1], 'c', f'{conflicts[2]} lala', 'd', f'{conflicts[0]} HEAD', 'e']) v1,v2,cf,names = _split_cell(tst, 0, [None,None]) assert v1 == 'a\nb\nd\ne' assert v2 == 'a\nc\nd' assert cf == 1 assert names == ['HEAD', 'lala'] #hide tst = '\n'.join(['a', f'{conflicts[0]} HEAD', 'b', conflicts[1], 'c', f'{conflicts[2]} lala', 'd', f'{conflicts[0]} HEAD', 'e', conflicts[1]]) v1,v2,cf,names = _split_cell(tst, 0, [None,None]) assert v1 == 'a\nb\nd\ne' assert v2 == 'a\nc\nd' assert cf == 2 assert names == ['HEAD', 'lala'] #hide tst = '\n'.join(['b', conflicts[1], 'c', f'{conflicts[2]} lala', 'd']) v1,v2,cf,names = _split_cell(tst, 1, ['HEAD',None]) assert v1 == 'b\nd' assert v2 == 'c\nd' assert cf == 0 assert names == ['HEAD', 'lala'] #hide tst = '\n'.join(['c', f'{conflicts[2]} lala', 'd']) v1,v2,cf,names = _split_cell(tst, 2, ['HEAD',None]) assert v1 == 'd' assert v2 == 'c\nd' assert cf == 0 assert names == ['HEAD', 'lala'] #export _re_conflict = re.compile(r'^<<<<<<<', re.MULTILINE) #hide assert _re_conflict.search('a\nb\nc') is None assert _re_conflict.search('a\n<<<<<<<\nc') is not None #export def same_inputs(t1, t2): "Test if the cells described in `t1` and `t2` have the same inputs" if len(t1)==0 or len(t2)==0: return False try: c1,c2 = json.loads(t1[:-1]),json.loads(t2[:-1]) return c1['source']==c2['source'] except Exception as e: return False ts = [''' { "cell_type": "code", "source": [ "'''+code+'''" ] },''' for code in ["a=1", "b=1", "a=1"]] assert same_inputs(ts[0],ts[2]) assert not same_inputs(ts[0], ts[1]) #export def analyze_cell(cell, cf, names, prev=None, added=False, fast=True, trust_us=True): "Analyze and solve conflicts in `cell`" if cf==0 and _re_conflict.search(cell) is None: return cell,cf,names,prev,added old_cf = cf v1,v2,cf,names = _split_cell(cell, cf, names) if fast and same_inputs(v1,v2): if old_cf==0 and cf==0: return (v2 if trust_us else v1),cf,names,prev,added v1,v2 = (v2,v2) if trust_us else (v1,v1) res = [] if old_cf == 0: added=True res.append(get_md_cell(f'`{conflicts[0]} {names[0]}`')) res.append(v1) if cf ==0: res.append(get_md_cell(f'`{conflicts[1]}`')) if prev is not None: res += prev res.append(v2) res.append(get_md_cell(f'`{conflicts[2]} {names[1]}`')) prev = None else: prev = [v2] if prev is None else prev + [v2] return '\n'.join([r for r in res if len(r) > 0]),cf,names,prev,added ``` This is the main function used to walk through the cells of a notebook. `cell` is the cell we're at, `cf` the conflict state: `0` if we're not in any conflict, `1` if we are inside the first part of a conflict (between `<<<<<<<` and `=======`) and `2` for the second part of a conflict. `names` contains the names of the branches (they start at `[None,None]` and get updated as we pass along conflicts). `prev` contains a copy of what should be included at the start of the second version (if `cf=1` or `cf=2`). `added` starts at `False` and keeps track of whether we added any markdown cells (this flag allows us to know if a fast merge didn't leave any conflicts at the end). `fast` and `trust_us` are passed along by `fix_conflicts`: if `fast` is `True`, we don't point out conflict between cells if the inputs in the two versions are the same. Instead we merge using the local or remote branch, depending on `trust_us`. The function then returns the updated text (with one or several cells, depending on the conflicts to solve), the updated `cf`, `names`, `prev` and `added`. ``` tst = '\n'.join(['a', f'{conflicts[0]} HEAD', 'b', conflicts[1], 'c']) c,cf,names,prev,added = analyze_cell(tst, 0, [None,None], None, False,fast=False) test_eq(c, get_md_cell('`<<<<<<< HEAD`')+'\na\nb') test_eq(cf, 2) test_eq(names, ['HEAD', None]) test_eq(prev, ['a\nc']) test_eq(added, True) ``` Here in this example, we were entering cell `tst` with no conflict state. At the end of the cells, we are still in the second part of the conflict, hence `cf=2`. The result returns a marker for the branch head, then the whole cell in version 1 (a + b). We save a (prior to the conflict hence common to the two versions) and c (only in version 2) for the next cell in `prev` (that should contain the resolution of this conflict). ## Main function ``` #export def fix_conflicts(fname, fast=True, trust_us=True): "Fix broken notebook in `fname`" fname=Path(fname) shutil.copy(fname, fname.with_suffix('.ipynb.bak')) with open(fname, 'r') as f: raw_text = f.read() start,cells,end = extract_cells(raw_text) res = [start] cf,names,prev,added = 0,[None,None],None,False for cell in cells: c,cf,names,prev,added = analyze_cell(cell, cf, names, prev, added, fast=fast, trust_us=trust_us) res.append(c) if res[-1].endswith(','): res[-1] = res[-1][:-1] with open(f'{fname}', 'w') as f: f.write('\n'.join([r for r in res+[end] if len(r) > 0])) if fast and not added: print("Succesfully merged conflicts!") else: print("One or more conflict remains in the notebook, please inspect manually.") ``` The function will begin by backing the notebook `fname` to `fname.bak` in case something goes wrong. Then it parses the broken json, solving conflicts in cells. If `fast=True`, every conflict that only involves metadata or outputs of cells will be solved automatically by using the local (`trust_us=True`) or the remote (`trust_us=False`) branch. Otherwise, or for conflicts involving the inputs of cells, the json will be repaired by including the two version of the conflicted cell(s) with markdown cells indicating the conflicts. You will be able to open the notebook again and search for the conflicts (look for `<<<<<<<`) then fix them as you wish. If `fast=True`, the function will print a message indicating whether the notebook was fully merged or if conflicts remain. ## Export- ``` #hide from nbdev.export import notebook2script notebook2script() ```
github_jupyter
<a href="https://colab.research.google.com/github/Ivan-Nebogatikov/HumanActivityRecognitionOutliersDetection/blob/main/Processing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Скачиваем данные, преобразуем их в одну таблицу ``` import numpy as np import pandas as pd import json from datetime import datetime from datetime import date from math import sqrt from zipfile import ZipFile from os import listdir from os.path import isfile, join filesDir = "/content/drive/MyDrive/training_data" csvFiles = [join(filesDir, f) for f in listdir(filesDir) if (isfile(join(filesDir, f)) and 'csv' in f)] data = pd.DataFrame() for file in csvFiles: if 'acc' in file: with ZipFile(file, 'r') as zipObj: listOfFileNames = zipObj.namelist() for fileName in listOfFileNames: if 'chest' in fileName: with zipObj.open(fileName) as csvFile: newData = pd.read_csv(csvFile) newData['type'] = str(csvFile.name).replace('_',' ').replace('.',' ').split()[1] data = data.append(newData) # newData = pd.read_csv(csvFile) # newColumns = [col for col in newData.columns if col not in data.columns] # print(newColumns) # if data.empty or not newColumns: # newData['type'] = str(csvFile.name).replace('_',' ').replace('.',' ').split()[1] # data = data.append(newData) # else: # for index, newRow in newData.iterrows(): # print(newRow['attr_time']) # print(data.iloc[[0]]['attr_time']) # print(len(data[data['attr_time'] < newRow['attr_time']])) # existingRow = data[data['attr_time'] <= newRow['attr_time']].iloc[-1] # existingRow[newColumns] = newRow[newColumns] # data = data.sort_values(by=['attr_time']) #print(data) data = data.sort_values(by=['attr_time']) print(data) # heart = pd.read_csv('https://raw.githubusercontent.com/Ivan-Nebogatikov/HumanActivityRecognition/master/datasets/2282_3888_bundle_archive/heart.csv') # heart['timestamp'] = heart['timestamp'].map(lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f")) # heart = heart.sort_values(by='timestamp') # def getHeart(x): # dt = datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f") # f = heart[heart['timestamp'] < dt] # lastValue = f.iloc[[-1]]['values'].tolist()[0] # intValue = list(json.loads(lastValue.replace('\'', '"')))[0] # return intValue # acc = pd.read_csv('https://raw.githubusercontent.com/Ivan-Nebogatikov/HumanActivityRecognition/master/datasets/2282_3888_bundle_archive/acc.csv') # acc['heart'] = acc['timestamp'].map(lambda x: getHeart(x)) # print(acc) # def change(x): # if x == 'Pause' or x == 'Movie': # x = 'Watching TV' # if x == 'Shop': # x = 'Walk' # if x == 'Football': # x = 'Running' # if x == 'Meeting' or x == 'Work' or x == 'Picnic ' or x == 'In vehicle' or x == 'In bus' : # x = 'Sitting' # if x == 'On bus stop': # x = 'Walk' # if x == 'Walking&party' or x == 'Shopping& wearing' or x == 'At home': # x = 'Walk' # return x # acc['act'] = acc['act'].map(lambda x: change(x)) # labels = np.array(acc['act']) # arrays = acc['values'].map(lambda x: getValue(x)) # x = getDiff(list(arrays.map(lambda x: np.double(x[0])))) # y = getDiff(list(arrays.map(lambda x: np.double(x[1])))) # z = getDiff(list(arrays.map(lambda x: np.double(x[2])))) # dist = list(map(lambda a, b, c: sqrt(a*a+b*b+c*c), x, y, z)) labels = np.array(data['type']) ``` ``` data['time_diff'] = data['attr_time'].diff() indMin = int(data[['time_diff']].idxmin()) print(indMin) t_j = data.iloc[indMin]['attr_time'] print(t_j) t_j1 = data.iloc[indMin+1]['attr_time'] diff = t_j1 - t_j print(diff) # interpolated = [] data['attr_x_i'] = data.apply(lambda row: (t_j1 - row['attr_time']) * row['attr_x'] / diff + (row['attr_time'] - t_j) * row['attr_x'] / diff, axis=1) # !!! тут нужен +1 строка data['attr_y_i'] = data.apply(lambda row: (t_j1 - row['attr_time']) * row['attr_y'] / diff + (row['attr_time'] - t_j) * row['attr_y'] / diff, axis=1) data['attr_z_i'] = data.apply(lambda row: (t_j1 - row['attr_time']) * row['attr_z'] / diff + (row['attr_time'] - t_j) * row['attr_z'] / diff, axis=1) # # for i, row in data.iterrows(): # # t_i = row['attr_time'] # # def axis(value): (t_j1 - t_i) * value / (t_j1 - t_j) + (t_i + t_j) * value / (t_j1 + t_j) # # interpolated.append([row["id"], row['attr_time'], axis(row['attr_x']), axis(row['attr_y']), axis(row['attr_z']), row['type'], row['time_diff']]) print(data) data['g_x'] = data['attr_x_i'].rolling(window=5).mean() data['g_y'] = data['attr_y_i'].rolling(window=5).mean() data['g_z'] = data['attr_z_i'].rolling(window=5).mean() print(data['g_x']) data['g_x'] = data['attr_x_i'].rolling(window=5).mean() data['g_y'] = data['attr_y_i'].rolling(window=5).mean() data['g_z'] = data['attr_z_i'].rolling(window=5).mean() print(data['g_x']) import numpy as np def acc(a, g): return np.cross(np.cross(a, g) / np.dot(g, g), g) data['a_tv'] = data.apply(lambda row: acc([row.attr_x_i, row.attr_y_i, row.attr_z_i], [row.g_x, row.g_y, row.g_z]), axis=1) data['a_th'] = data.apply(lambda row: [row.attr_x_i - row.a_tv[0], row.attr_y_i - row.a_tv[1], row.attr_z_i - row.a_tv[2]], axis=1) print(data['a_tv']) print(data['a_th']) ``` Вспомогательная функция для вывода результатов ``` import pandas as pd import numpy as np from scipy import interp from sklearn.metrics import accuracy_score from sklearn.metrics import precision_recall_fscore_support from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import LabelBinarizer def class_report(y_true, y_pred, y_score=None, average='micro'): if y_true.shape != y_pred.shape: print("Error! y_true %s is not the same shape as y_pred %s" % ( y_true.shape, y_pred.shape) ) return accuracy = accuracy_score(y_true, y_pred) print("Accuracy:", accuracy) lb = LabelBinarizer() if len(y_true.shape) == 1: lb.fit(y_true) #Value counts of predictions labels, cnt = np.unique( y_pred, return_counts=True) n_classes = 5 pred_cnt = pd.Series(cnt, index=labels) metrics_summary = precision_recall_fscore_support( y_true=y_true, y_pred=y_pred, labels=labels) avg = list(precision_recall_fscore_support( y_true=y_true, y_pred=y_pred, average='weighted')) metrics_sum_index = ['precision', 'recall', 'f1-score', 'support'] class_report_df = pd.DataFrame( list(metrics_summary), index=metrics_sum_index, columns=labels) support = class_report_df.loc['support'] total = support.sum() class_report_df['avg / total'] = avg[:-1] + [total] class_report_df = class_report_df.T class_report_df['pred'] = pred_cnt class_report_df['pred'].iloc[-1] = total if not (y_score is None): fpr = dict() tpr = dict() roc_auc = dict() for label_it, label in enumerate(labels): fpr[label], tpr[label], _ = roc_curve( (y_true == label).astype(int), y_score[:, label_it]) roc_auc[label] = auc(fpr[label], tpr[label]) if average == 'micro': if n_classes <= 2: fpr["avg / total"], tpr["avg / total"], _ = roc_curve( lb.transform(y_true).ravel(), y_score[:, 1].ravel()) else: fpr["avg / total"], tpr["avg / total"], _ = roc_curve( lb.transform(y_true).ravel(), y_score.ravel()) roc_auc["avg / total"] = auc( fpr["avg / total"], tpr["avg / total"]) elif average == 'macro': # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([ fpr[i] for i in labels] )) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in labels: mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["avg / total"] = auc(fpr["macro"], tpr["macro"]) class_report_df['AUC'] = pd.Series(roc_auc) print(class_report_df) return accuracy ``` Определяем функции для предсказания с использованием классификатора и с использованием нескольких классификаторов ``` from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import roc_auc_score from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import pandas as pd from sklearn.model_selection import cross_val_score from sklearn.metrics import plot_confusion_matrix import matplotlib.pyplot as plt from sklearn.utils import shuffle def Predict(x, classifier = RandomForestClassifier(n_estimators = 400, random_state = 3, class_weight='balanced')): train_features, test_features, train_labels, test_labels = train_test_split(x, labels, test_size = 0.15, random_state = 242) print('Training Features Shape:', train_features.shape) print('Testing Features Shape:', test_features.shape) print("\n") classifier.fit(train_features, train_labels); x_shuffled, labels_shuffled = shuffle(np.array(x), np.array(labels)) scores = cross_val_score(classifier, x_shuffled, labels_shuffled, cv=7) print("%f accuracy with a standard deviation of %f" % (scores.mean(), scores.std())) predictions = list(classifier.predict(test_features)) pred_prob = classifier.predict_proba(test_features) accuracy = class_report( y_true=test_labels, y_pred=np.asarray(predictions), y_score=pred_prob, average='micro') if hasattr(classifier, 'feature_importances_'): print(classifier.feature_importances_) plot_confusion_matrix(classifier, test_features, test_labels) plt.xticks(rotation = 90) plt.style.library['seaborn-darkgrid'] plt.show() return [accuracy, scores.mean(), scores.std()] def PredictWithClassifiers(data, classifiers): accuracies = {} for name, value in classifiers.items(): accuracy = Predict(data, value) accuracies[name] = accuracy print("\n") df = pd.DataFrame({(k, v[0], v[1], v[2]) for k, v in accuracies.items()}, columns=["Method", "Accuracy", "Mean", "Std"]) print(df) ``` Определяем набор используемых классификаторов ``` from sklearn import svm from sklearn.naive_bayes import GaussianNB from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.ensemble import AdaBoostClassifier methods = { "MLP" : MLPClassifier(random_state=1, max_iter=300), "K-neigh" : KNeighborsClassifier(), # default k = 5 "Random Forest" : RandomForestClassifier(n_estimators = 400, random_state = 3, class_weight='balanced'), "Bayes" : GaussianNB(), "AdaBoost" : AdaBoostClassifier(), "SVM" : svm.SVC(probability=True, class_weight='balanced') } frame = pd.DataFrame(data['a_th'].to_list(), columns=['x','y','z']).fillna(0) print(frame) feature_list = list(frame.columns) print(frame) PredictWithClassifiers(frame, methods) ```
github_jupyter
# SF Salaries Exercise - Solutions Welcome to a quick exercise for you to practice your pandas skills! We will be using the [SF Salaries Dataset](https://www.kaggle.com/kaggle/sf-salaries) from Kaggle! Just follow along and complete the tasks outlined in bold below. The tasks will get harder and harder as you go along. ** Import pandas as pd.** ``` import pandas as pd ``` ** Read Salaries.csv as a dataframe called sal.** ``` sal = pd.read_csv('Salaries.csv') ``` ** Check the head of the DataFrame. ** ``` sal.head() ``` ** Use the .info() method to find out how many entries there are.** ``` sal.info() # 148654 Entries ``` **What is the average BasePay ?** ``` sal['BasePay'].mean() ``` ** What is the highest amount of OvertimePay in the dataset ? ** ``` sal['OvertimePay'].max() ``` ** What is the job title of JOSEPH DRISCOLL ? Note: Use all caps, otherwise you may get an answer that doesn't match up (there is also a lowercase Joseph Driscoll). ** ``` sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['JobTitle'] ``` ** How much does JOSEPH DRISCOLL make (including benefits)? ** ``` sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['TotalPayBenefits'] ``` ** What is the name of highest paid person (including benefits)?** ``` sal[sal['TotalPayBenefits']== sal['TotalPayBenefits'].max()] #['EmployeeName'] # or # sal.loc[sal['TotalPayBenefits'].idxmax()] ``` ** What is the name of lowest paid person (including benefits)? Do you notice something strange about how much he or she is paid?** ``` sal[sal['TotalPayBenefits']== sal['TotalPayBenefits'].min()] #['EmployeeName'] # or # sal.loc[sal['TotalPayBenefits'].idxmax()]['EmployeeName'] ## ITS NEGATIVE!! VERY STRANGE sal.groupby('Year')['BasePay'].mean() ``` ** What was the average (mean) BasePay of all employees per year? (2011-2014) ? ** ``` sal.groupby('Year').mean()['BasePay'] ``` ** How many unique job titles are there? ** ``` sal['JobTitle'].nunique() sal.head(0) # Get the col headers only sal['JobTitle'].value_counts().head() ``` ** What are the top 5 most common jobs? ** ``` sal['JobTitle'].value_counts().head(5) # sum(sal[sal['Year']==2013]['JobTitle'].value_counts() == 1) # sal[sal['Year']==2013sal['Year']==2013 yr_cond = sal['Year']==2013 # Return a series of all rows in sal and whether they have 2013 in their 'Year' column yr_filtered_sal = sal[yr_cond] # Return sal filtered to only the rows that meet above criteria job_counts = yr_filtered_sal['JobTitle'].value_counts() job_counts_is_one = (job_counts == 1) sum(job_counts_is_one) ``` ** How many Job Titles were represented by only one person in 2013? (e.g. Job Titles with only one occurence in 2013?) ** ``` sum(sal[sal['Year']==2013]['JobTitle'].value_counts() == 1) # pretty tricky way to do this... sal def chief_check(title): if 'chief' in title.lower(): return True else: return False sum(sal['JobTitle'].apply(lambda title: chief_check(title))) ``` ** How many people have the word Chief in their job title? (This is pretty tricky) ** ``` def chief_string(title): if 'chief' in title.lower(): return True else: return False sal['Title_len'] = sal['JobTitle'].apply(len) sal[['Title_len','TotalPayBenefits']].corr() sum(sal['JobTitle'].apply(lambda x: chief_string(x))) ``` ** Bonus: Is there a correlation between length of the Job Title string and Salary? ** ``` sal['title_len'] = sal['JobTitle'].apply(len) sal[['title_len','TotalPayBenefits']].corr() # No correlation. ``` # Great Job!
github_jupyter
$\newcommand{\xv}{\mathbf{x}} \newcommand{\wv}{\mathbf{w}} \newcommand{\yv}{\mathbf{y}} \newcommand{\zv}{\mathbf{z}} \newcommand{\uv}{\mathbf{u}} \newcommand{\vv}{\mathbf{v}} \newcommand{\Chi}{\mathcal{X}} \newcommand{\R}{\rm I\!R} \newcommand{\sign}{\text{sign}} \newcommand{\Tm}{\mathbf{T}} \newcommand{\Xm}{\mathbf{X}} \newcommand{\Zm}{\mathbf{Z}} \newcommand{\Im}{\mathbf{I}} \newcommand{\Um}{\mathbf{U}} \newcommand{\Vm}{\mathbf{V}} \newcommand{\muv}{\boldsymbol\mu} \newcommand{\Sigmav}{\boldsymbol\Sigma} \newcommand{\Lambdav}{\boldsymbol\Lambda} $ # Machine Learning Methodology For machine learning algorithms, we learned how to set goas to optimize and how to reach or approach the optimal solutions. Now, let us discuss how to evaluate the learned models. There will be many different aspects that we need to consider not simply accuracy, so we will further discuss techniques to make the machine learning models better. ### Performance Measurement, Overfitting, Regularization, and Cross-Validation In machine learning, *what is a good measure to assess the quality of a machine learning model?* Let us step back from what we have learned in class about ML techniques and think about this. In previous lectures, we have discussed various measures such a `root mean square error (RMSE)`, `mean square error (MSE)`, `mean absolute error (MAE)` for **regression problems**, and `accuracy`, `confusion matrix`, `precision/recall`, `F1-score`, `receiver operating characteristic (ROC) curve`, and others for **classification**. For your references, here are the list of references for diverse metrics for different categories of machine learning. * Regressions: https://arxiv.org/pdf/1809.03006.pdf * Classification: As we have a cheatsheet already, here is a comprehensive version from ICMLA tutorial. https://www.icmla-conference.org/icmla11/PE_Tutorial.pdf * Clustering: https://scikit-learn.org/stable/modules/clustering.html#clustering-performance-evaluation Anyway, are these measures good enough to say a specific model is better than the other? Let us take a look at following codes examples and think about something that we are missing. ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline from copy import deepcopy as copy x = np.arange(3) t = copy(x) def plot_data(): plt.plot(x, t, "o", markersize=10) plot_data() ``` I know that it is silly to apply a linear regression on this obvious model, but let us try. :) ``` ## Least Square solution: Filled codes here to fit and plot as the instructor's output import numpy as np # First creast X1 by adding 1's column to X N = x.shape[0] X1 = np.c_[np.ones((N, 1)), x] # Next, using inverse, solve, lstsq function to get w* w = np.linalg.inv(X1.transpose().dot(X1)).dot(X1.transpose()).dot(t) # print(w) y = X1.dot(w) plot_data() plt.plot(y) ``` Can we try a nonlinear model on this data? Why not? We can make a nonlinear model by simply adding higher degree terms such square, cubic, quartic, and so on. $$ f(\xv; \wv) = w_0 + w_1 \xv + w_2 \xv^2 + w_3 \xv^3 + \cdots $$ This is called *polynomial regression* as we transform the input features to nonlinear by extending the features high dimensional with higher polynomial degree terms. For instance, your input feature $(1, x)$ is extended to $(1, x, x^2, x^3)$ for cubic polynomial regression model. After input transformation, you can simply use least squares or least mean squares to find the weights as the model is still linear with respect to the weight $\wv$. Let us make the polynomial regression model and fit to the data above with lease squares. ``` # Polinomial regression def poly_regress(x, d=3, t=None, **params): bnorm = params.pop('normalize', False) X_poly = [] ####################################################################################### # Transform input features: append polynomial terms (from bias when i=0) to degree d for i in range(d+1): X_poly.append(x**i) X_poly = np.vstack(X_poly).T # normalize if bnorm: mu, sd = np.mean(X_poly[:, 1:, None], axis=0), np.std(X_poly[:, 1:, None], axis=0) X_poly[:, 1:] = (X_poly[:, 1:] - mu.flat) / sd.flat # least sqaures if t is not None: # added least square solution here w = np.linalg.inv(X_poly.transpose().dot(X_poly)).dot(X_poly.transpose()).dot(t) if bnorm: return X_poly, mu, sd, w return X_poly, w if bnorm: return X_poly, mu, sd return X_poly ``` The poly_regress() function trains with the data when target input is given after transform the input x as the following example. The function also returns the transformed input X_poly. ``` Xp, wp = poly_regress(x, 3, t) print(wp.shape) print(Xp.shape) yp = Xp @ wp plot_data() plt.plot(x, y) plt.plot(x, yp) ``` Hmm... They both look good on this. Then, what is the difference? Let us take a look at how they change if I add the test data. If I compare the MSE, they are equivalent. Try to expand the data for test and see how different they are. Here, we use another usage of poly_regress() function without passing target, so we transform the target input to polynomial features. ``` xtest = np.arange(11)-5 Xptest = poly_regress(xtest, 3) yptest = Xptest @ wp X1test = np.vstack((np.ones(len(xtest)), xtest)).T ytest = X1test @ w plot_data() plt.plot(xtest, ytest) plt.plot(xtest, yptest) ``` Here the orange is th linear model and the green line is 3rd degree polynomial regression. Which model looks better? What is your pick? <br/><br/><br/><br/><br/><br/> ## Learning Curve From the above example, we realized that the model evaluation we discussed so far is not enough. First, let us consider how well a learned model generalizes to new data with respect to the number of training samples. We assume that the test data are drawn from same distribution over example space as training data. In this plot, we can compare the two learning algorithms and find which one generalizes better than the other. Also, during the training, we can access to the training error (or empirical loss). This may not look similar (mostly not) to the test error (generalization loss above). Let us take a look at the example in the Geron textbook. ``` import os import pandas as pd import sklearn def prepare_country_stats(oecd_bli, gdp_per_capita): oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"] oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value") gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True) gdp_per_capita.set_index("Country", inplace=True) full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, left_index=True, right_index=True) full_country_stats.sort_values(by="GDP per capita", inplace=True) remove_indices = [0, 1, 6, 8, 33, 34, 35] keep_indices = list(set(range(36)) - set(remove_indices)) return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices], full_country_stats[["GDP per capita", 'Life satisfaction']] !curl https://raw.githubusercontent.com/ageron/handson-ml/master/datasets/lifesat/oecd_bli_2015.csv > oecd_bli_2015.csv !curl https://raw.githubusercontent.com/ageron/handson-ml/master/datasets/lifesat/gdp_per_capita.csv > gdp_per_capita.csv # Load the data oecd_bli = pd.read_csv("oecd_bli_2015.csv", thousands=',') gdp_per_capita = pd.read_csv("gdp_per_capita.csv",thousands=',',delimiter='\t', encoding='latin1', na_values="n/a") # Prepare the data country_stats, full_country_stats = prepare_country_stats(oecd_bli, gdp_per_capita) X = np.c_[country_stats["GDP per capita"]] y = np.c_[country_stats["Life satisfaction"]] # Visualize the data country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(6.5,4)) plt.axis([0, 60000, 0, 10]) plt.show() ``` This looks like the data showing a linear trend. Now, let us extend the x-axis to further to 110K and see how it looks. ``` # Visualize the full data # Visualize the data full_country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(12,4)) plt.axis([0, 110000, 0, 10]) plt.show() ``` Maybe a few outliers with high GDP do not follow the linear trend that we observed above. ``` # Data for training Xfull = np.c_[full_country_stats["GDP per capita"]] yfull = np.c_[full_country_stats["Life satisfaction"]] print(Xfull.shape, yfull.shape) ``` We can better observe the trend by fitting polynomial regression models by changing the degrees. ``` # polynomial model to this data for deg in [1, 2, 5, 10, 30]: plt.figure(); full_country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(12,4)); plt.axis([0, 110000, 3, 10]); Xp, mu, sd, wp = poly_regress(Xfull.flatten(), deg, yfull.flatten(), normalize=True) yp1 = Xp @ wp # plot curve plt.plot(Xfull, yp1, 'r-', label=deg); plt.title("degree: {}".format(deg)); ``` What degree do you think the data follow? What is your best pick? Do you see overfitting here? From which one do you see it? As the complexity of model grows, you may have small training errors. However, there is no guarantee that you have a good generalization (you may have very bad generalization error!). This is called **Overfitting** problem in machine learning. From training data, once you learned the hypothesis *h* (or machine learning model), you can have training error $E_{train}(h)$ and testing error $E_{test}(h)$. Let us say that there is another model $h^\prime$ for which $$ E_{train}(h) < E_{train}(h^\prime) \wedge E_{test}(h) > E_{test}(h^\prime).$$ Then, we say the hypothesis $h$ is "overfitted." ## Bias-Variance Tradeoff Here the bias refers an error from erroneous assumptions and the variance means an error from sensitivity to small variation in the data. Thus, high bias can cause an underfitted model and high variance can cause an overfitted model. Finding the sweet spot that have good generalization is on our hand. In the same track of discussion, Scott summarizes the errors that we need to consider as follows: - high bias error: under-performing model that misses the important trends - high variance error: excessively sensitive to small variations in the training data - Irreducible error: genuine to the noise in the data. Need to clean up the data ![](http://webpages.uncc.edu/mlee173/teach/itcs4156online/images/class/bias-and-variance.jpg) <center>From Understanding the Bias-Variance Tradeoff, by Scott Fortmann-Roe</center> ## Regularization We reduce overfitting by addding a complexity penalty to the loss function. Here follows the loss function for the linear regression with $L2$-norm. $$ \begin{align*} E(\wv) &= \sum_i^N ( y_i - t_i)^2 + \lambda \lVert \wv \rVert_2^2 \\ \\ &= \sum_i^N ( y_i - t_i)^2 + \lambda \sum_k^D w_k^2 \\ \\ &= (\Xm \wv - T)^\top (\Xm \wv - T) + \lambda \wv^\top \wv \\ \\ &= \wv^\top \Xm^\top \Xm \wv - 2 \Tm^\top \Xm \wv + \Tm^\top \Tm + \lambda \wv^\top \wv \end{align*} $$ Repeating the derivation as in linear regression, $$ \begin{align*} \frac{\partial E(\wv)}{\partial \wv} &= \frac{\partial (\Xm \wv - \Tm)^\top (\Xm \wv - \Tm)}{\partial \wv} + \frac{\partial \lambda \wv^\top \wv}{\partial \wv} \\ \\ &= 2 \Xm^\top \Xm \wv - 2 \Xm^\top \Tm + 2 \lambda \wv \end{align*} $$ Setting the last term zero, we reach the solution of *ridge regression*: $$ \begin{align*} 2 \Xm^\top \Xm \wv - 2 \Xm^\top \Tm + 2 \lambda \wv &= 0\\ \\ \big(\Xm^\top \Xm + \lambda \Im \big) \wv &= \Xm^\top \Tm\\ \\ \wv &= \big(\Xm^\top \Xm + \lambda \Im \big)^{-1} \Xm^\top \Tm. \end{align*} $$ ## Cross-Validation Now, let us select a model. Even with the regularization, we still need to pick $\lambda$. For polynomial regression, we need to find the degree parameter. When we are mix-using multiple algorithms, we still need to know which model to choose. Here, remembe that we want a model that have good generalization. The idea is preparing one dataset (a validation set) by pretending that we cannot see the labels. After choosing a model parameter (or a model) and train it with training dataset, we test it on the validation data. Comparing the validation error, we select the one that has the lowest validation error. Finally, we evaluate the model on testing data. Here follows the K-fold cross-validation that divides the data into K blocks for traing, validating and testing. ### K-fold CV Procedure ![image.png](attachment:image.png) ## Feature Selection Another way to get a sparse (possibly good generalization) model is using small set of most relevant features. Weight analysis or some other tools can give us what is most relevant or irrelevant features w.r.t the training error. But it is still hard to tell the relevance to the generalization error. Thus, problems in choosing a minimally relevant set of features is NP-hard even with perfect estimation of generalization error. With inaccurate estimation that we have, it is much hard to find them. Thus, we can simply use cross-validation to find features. We can greedily add (forward selection) or delete (backward selection) features that decrease cross-validation error most. # Practice Now, try to write your own 5-fold cross validation code that follows the procedure above for ridge regression. Try 5 different $\lambda$ values, [0, 0.01, 0.1, 1, 10], for this. ``` # TODO: try to implement your own K-fold CV. # (This will be a part of next assignment (no solution will be provided.)) ```
github_jupyter
# GOOGLE PLAYSTORE ANALYSIS The dataset used in this analysis is taken from [kaggle datasets](https://www.kaggle.com/datasets) In this analysis we took a raw data which is in csv format and then converted it into a dataframe.Performed some operations, cleaning of the data and finally visualizing some necessary conclusions obtained from it. Let's import necessary libraries required for the analysis ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns ``` Convert the csv file into dataframe using pandas ``` df=pd.read_csv('googleplaystore.csv') df.head(5) ``` This is the data we obtained from the csv file.Let's see some info about this dataframe ``` df.info() ``` This dataframe consists of 10841 entries ie information about 10841 apps. It tells about the category to which the app belongs,rating given by the users,size of the app,number of reviews given,count of number of installs and some other information # DATA CLEANING Some columns have in-appropriate data,data types.This columns needed to be cleaned to perform the analysis. ##### SIZE : This column has in-appropriate data type.This needed to be converted into numeric type after converting every value into MB's For example, the size of the app is in “string” format. We need to convert it into a numeric value. If the size is “10M”, then ‘M’ was removed to get the numeric value of ‘10’. If the size is “512k”, which depicts app size in kilobytes, the first ‘k’ should be removed and the size should be converted to an equivalent of ‘megabytes’. ``` df['Size'] = df['Size'].map(lambda x: x.rstrip('M')) df['Size'] = df['Size'].map(lambda x: str(round((float(x.rstrip('k'))/1024), 1)) if x[-1]=='k' else x) df['Size'] = df['Size'].map(lambda x: np.nan if x.startswith('Varies') else x) ``` 10472 has in-appropriate data in every column, may due to entry mistake.So we are removing that entry from the table ``` df.drop(10472,inplace=True) ``` By using pd.to_numeric command we are converting into numeric type ``` df['Size']=df['Size'].apply(pd.to_numeric) ``` ##### Installs : The value of installs is in “string” format. It contains numeric values with commas. It should be removed. And also, the ‘+’ sign should be removed from the end of each string. ``` df['Installs'] = df['Installs'].map(lambda x: x.rstrip('+')) df['Installs'] = df['Installs'].map(lambda x: ''.join(x.split(','))) ``` By using pd.to_numeric command we are converting it into numeric data type ``` df['Installs']=df['Installs'].apply(pd.to_numeric) ``` ##### Reviews : The reviews column is in string format and we need to convert it into numeric type ``` df['Reviews']=df['Reviews'].apply(pd.to_numeric) ``` After cleaning some columns and rows we obtained the required format to perform the analysis ``` df.head(5) ``` # DATA VISUALIZATION In this we are taking a parameter as reference and checking the trend of another parameter like whether there is a rise or fall,which category are more,what kinds are of more intrest and so on. ###### Basic pie chart to view distribution of apps across various categories ``` fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(aspect="equal")) number_of_apps = df["Category"].value_counts() labels = number_of_apps.index sizes = number_of_apps.values ax.pie(sizes,labeldistance=2,autopct='%1.1f%%') ax.legend(labels=labels,loc="right",bbox_to_anchor=(0.9, 0, 0.5, 1)) ax.axis("equal") plt.show() ``` ## App count for certain range of Ratings In this we are finding the count of apps for each range from 0 to 5 ie how many apps have more rating,how many are less rated. ``` bins=pd.cut(df['Rating'],[0.0,1.0,2.0,3.0,4.0,5.0]) rating_df=pd.DataFrame(df.groupby(bins)['App'].count()) rating_df.reset_index(inplace=True) rating_df plt.figure(figsize=(12, 6)) axis=sns.barplot('Rating','App',data=rating_df); axis.set(ylabel= "App count",title='APP COUNT STATISTICS ACCORDING TO RATING'); ``` We can see that most of the apps are with rating 4 and above and very less apps have rating below 2. ## Top5 Apps with highest review count In this we are retrieving the top5 apps with more number of reviews and seeing it visually how their review count is changing. ``` reviews_df=df.sort_values('Reviews').tail(15).drop_duplicates(subset='App')[['App','Reviews','Rating']] reviews_df plt.figure(figsize=(12, 6)) axis=sns.lineplot(x="App",y="Reviews",data=reviews_df) axis.set(title="Top 5 most Reviewed Apps"); sns.set_style('darkgrid') ``` Facebook has more reviews compared to other apps in the playstore ## Which content type Apps are more in playstore In this we are grouping the apps according to their content type and visually observing the result ``` content_df=pd.DataFrame(df.groupby('Content Rating')['App'].count()) content_df.reset_index(inplace=True) content_df plt.figure(figsize=(12, 6)) plt.bar(content_df['Content Rating'],content_df['App']); plt.xlabel('Content Rating') plt.ylabel('App count') plt.title('App count for different Contents'); ``` Most of the apps in playstore can be used by everyone irrespective of the age.Only 3 apps are A rated ##### --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ## Free vs Paid Apps Let's see variations considering type of App ie paid and free apps ``` Type_df=df.groupby('Type')[['App']].count() Type_df['Rating']=df.groupby('Type')['Rating'].mean() Type_df.reset_index(inplace=True) Type_df ``` We found the number of apps that are freely available and their average rating and also number of paid apps and their average rating. ``` fig, axes = plt.subplots(1, 2, figsize=(18, 6)) axes[0].bar(Type_df.Type,Type_df.App) axes[0].set_title("Number of free and paid apps") axes[0].set_ylabel('App count') axes[1].bar(Type_df.Type,Type_df.Rating) axes[1].set_title('Average Rating of free and paid apps') axes[1].set_ylabel('Average Rating'); ``` #### Conclusion Average rating of Paid Apps is more than Free apps.So,we can say that paid apps are trust worthy and we can invest in them ##### ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ## Max Installs In this we are finding the apps with more number of installs and as we dont have exact count of installs we got around 20 apps with 1B+ downloads From the 20 apps we will see some analysis of what types are more installed ``` max_installs=df.loc[df['Installs']==df.Installs.max()][['App','Category','Reviews','Rating','Installs','Content Rating']] max_installs=max_installs.drop_duplicates(subset='App') max_installs ``` These are the 20 apps which are with 1B+ downloads ### Which App has more rating and trend of 20 apps rating ``` plt.figure(figsize=(12, 6)) sns.barplot('Rating','App',data=max_installs); ``` We can see that Google photos,Instagram and Subway Surfers are the most rated Apps which have 1B+ downloads. Though the Apps are used by 1B+ users they have a good rating too ### Which content Apps are most Installed We will group the most installed apps according to their content and see which content apps are most installed ``` content_max_df=pd.DataFrame(max_installs.groupby('Content Rating')['App'].count()) content_max_df.reset_index(inplace=True) content_max_df plt.figure(figsize=(12, 6)) axis=sns.barplot('Content Rating','App',data=content_max_df); axis.set(ylabel= "App count",title='Max Installed APP COUNT STATISTICS ACCORDING TO Content RATING'); ``` More than 10 apps are of type which can be used by any age group and about 8 apps are teen aged apps.Only 1 app is to used by person with age 10+ ### Which category Apps are more Installed In this we will group the most installed apps according to their category and see which category are on high demand ``` category_max_df=pd.DataFrame(max_installs.groupby('Category')['App'].count()) category_max_df.reset_index(inplace=True) category_max_df plt.figure(figsize=(12, 6)) axis=sns.barplot('App','Category',data=category_max_df); plt.plot(category_max_df.App,category_max_df.Category,'o--r') axis.set(ylabel= "App count",title='Max Installed APP COUNT STATISTICS ACCORDING TO Category'); ``` Communication Apps are mostly installed by people like facebook,whatsapp,instagram..and then social apps are in demand. #### Conclusion The most installed apps ie apps with downloads more than 1 Billion are mostly Communication related apps and can be used by any age group without any restriction and also have high user rating. ###### ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ## Final Conclusion This analysis is mostly based on the existing data in the dataset , how one parameter is changing with respect to another parameter,whether paid apps are trust worthy and intrests of people towards some particular categories. This analysis can further be extended to predict the number of installs and ratings would be if a new app is launched by some Machine Learning algortihms and models. ##### ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ### THANK YOU :)
github_jupyter
_Lambda School Data Science_ # Make explanatory visualizations Tody we will reproduce this [example by FiveThirtyEight:](https://fivethirtyeight.com/features/al-gores-new-movie-exposes-the-big-flaw-in-online-movie-ratings/) ``` from IPython.display import display, Image url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png' example = Image(url=url, width=400) display(example) ``` Using this data: https://github.com/fivethirtyeight/data/tree/master/inconvenient-sequel Objectives - add emphasis and annotations to transform visualizations from exploratory to explanatory - remove clutter from visualizations Links - [Strong Titles Are The Biggest Bang for Your Buck](http://stephanieevergreen.com/strong-titles/) - [Remove to improve (the data-ink ratio)](https://www.darkhorseanalytics.com/blog/data-looks-better-naked) - [How to Generate FiveThirtyEight Graphs in Python](https://www.dataquest.io/blog/making-538-plots/) ## Make prototypes This helps us understand the problem ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) # index will start from 0 if not for this fake.plot.bar(color='C1', width=0.9); fake2 = pd.Series( [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]) fake2.value_counts().sort_index().plot.bar(color='C1', width=0.9); ``` ## Annotate with text ``` display(example) plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) # index will start from 0 if not for this fake.plot.bar(color='C1', width=0.9); # rotate x axis numbers plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) # index will start from 0 if not for this ax = fake.plot.bar(color='C1', width=0.9) ax.tick_params(labelrotation=0) #to unrotate or remove the rotation ax.set(title="'An Incovenient Sequel: Truth to Power' is divisive"); #or '\'An Incovenient Sequel: Truth to Power\' is divisive' plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) # index will start from 0 if not for this ax = fake.plot.bar(color='C1', width=0.9) ax.tick_params(labelrotation=0) ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive", fontsize=16, fontweight='bold') ax.text(x=-2,y=45, s='IMDb ratings for the film as of Aug. 29', fontsize=12) ax.set(xlabel='Rating', ylabel='Percent of total votes', yticks=range(0,50,10)); #(start pt., end pt., increment) ``` ## Reproduce with real data ``` df = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/inconvenient-sequel/ratings.csv') df.shape df.head() width,height = df.shape width*height pd.options.display.max_columns = 500 df.head() df.sample(1).T df.timestamp.describe() # convert timestamp to date time df.timestamp = pd.to_datetime(df.timestamp) df.timestamp.describe() # Making datetime index of your df df = df.set_index('timestamp') df.head() df['2017-08-09'] # everything from this date df.category.value_counts() ``` ####only interested in IMDb users ``` df.category == 'IMDb users' # As a filter to select certain rows df[df.category == 'IMDb users'] lastday = df['2017-08-09'] lastday.head(1) lastday[lastday.category =='IMDb users'].tail() lastday[lastday.category =='IMDb users'].respondents.plot(); final = df.tail(1) #columns = ['1_pct','2_pct','3_pct','4_pct','5_pct','6_pct','7_pct','8_pct','9_pct','10_pct'] #OR columns = [str(i) + '_pct' for i in range(1,11)] final[columns] #OR #data.index.str.replace('_pct', '') data = final[columns].T data data.plot.bar() plt.style.use('fivethirtyeight') ax = data.plot.bar(color='C1', width=0.9) ax.tick_params(labelrotation=0) ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive", fontsize=16, fontweight='bold') ax.text(x=-2,y=44, s='IMDb ratings for the film as of Aug. 29', fontsize=12) ax.set(xlabel='Rating', ylabel='Percent of total votes', yticks=range(0,50,10)); #(start pt., end pt., increment) # to remove the timestamp texts in the center # to change the x axis texts plt.style.use('fivethirtyeight') ax = data.plot.bar(color='C1', width=0.9, legend=False) ax.tick_params(labelrotation=0) ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive", fontsize=16, fontweight='bold') ax.text(x=-2,y=44, s='IMDb ratings for the film as of Aug. 29', fontsize=12) ax.set(xlabel='Rating', ylabel='Percent of total votes', yticks=range(0,50,10)); data.index = range(1,11) data plt.style.use('fivethirtyeight') ax = data.plot.bar(color='C1', width=0.9, legend=False) ax.tick_params(labelrotation=0) ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive", fontsize=16, fontweight='bold') ax.text(x=-2,y=44, s='IMDb ratings for the film as of Aug. 29', fontsize=12) ax.set(xlabel='Rating', ylabel='Percent of total votes', yticks=range(0,50,10)) plt.xlabel('Rating', fontsize=14); ``` # ASSIGNMENT Replicate the lesson code. I recommend that you [do not copy-paste](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit). # STRETCH OPTIONS #### Reproduce another example from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/). For example: - [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) (try the [`altair`](https://altair-viz.github.io/gallery/index.html#maps) library) - [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) (try the [`statsmodels`](https://www.statsmodels.org/stable/index.html) library) - or another example of your choice! #### Make more charts! Choose a chart you want to make, from [FT's Visual Vocabulary poster](http://ft.com/vocabulary). Find the chart in an example gallery of a Python data visualization library: - [Seaborn](http://seaborn.pydata.org/examples/index.html) - [Altair](https://altair-viz.github.io/gallery/index.html) - [Matplotlib](https://matplotlib.org/gallery.html) - [Pandas](https://pandas.pydata.org/pandas-docs/stable/visualization.html) Reproduce the chart. [Optionally, try the "Ben Franklin Method."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) If you want, experiment and make changes. Take notes. Consider sharing your work with your cohort!
github_jupyter
# Exam 2 - Gema Castillo García ``` %load_ext sql %config SqlMagic.autocommit=True %sql mysql+pymysql://root:[email protected]:3306/mysql ``` ## Problem 1: Controls Write a Python script that proves that the lines of data in Germplasm.tsv, and LocusGene are in the same sequence, based on the AGI Locus Code (ATxGxxxxxx). (hint: This will help you decide how to load the data into the database) ``` import pandas as pd import csv gp = pd.read_csv('Germplasm.tsv', sep='\t') matrix2 = gp[gp.columns[0]].to_numpy() germplasm = matrix2.tolist() #print(germplasm) ##to see the first column (AGI Locus Codes) of Germplasm.tsv lg = pd.read_csv('LocusGene.tsv', sep='\t') matrix2 = lg[lg.columns[0]].to_numpy() locus = matrix2.tolist() #print(locus) ##to see the first column (AGI Locus Codes) of LocusGene.tsv if (germplasm == locus): print("lines of data are in the same sequence") else: print("lines of data are not in the same sequence") ``` **I have only compared the first columns because is where AGI Codes are (they are the same in the two tables).** ## Problem 2: Design and create the database. * It should have two tables - one for each of the two data files * The two tables should be linked in a 1:1 relationship * you may use either sqlMagic or pymysql to build the database ``` ##creating a database called germplasm %sql create database germplasm; ##showing the existing databases %sql show databases; ##selecting the new database to interact with it %sql use germplasm; %sql show tables; ##the database is empty (it has not tables as expected) ##showing the structure of the tables I want to add to the germplasm database germplasm_file = open("Germplasm.tsv", "r") print(germplasm_file.read()) print() print() locus_file = open("LocusGene.tsv", "r") print(locus_file.read()) germplasm_file.close() ##closing the Germplasm.tsv file locus_file.close() ##closing the LocusGene.tsv file ##creating a table for Germplasm data %sql CREATE TABLE Germplasm_table(locus VARCHAR(10) NOT NULL PRIMARY KEY, germplasm VARCHAR(30) NOT NULL, phenotype VARCHAR(1000) NOT NULL, pubmed INTEGER NOT NULL); %sql DESCRIBE Germplasm_table; ##creating a table for Locus data %sql CREATE TABLE Locus_table(locus VARCHAR(10) NOT NULL PRIMARY KEY, gene VARCHAR(10) NOT NULL, protein_lenght INTEGER NOT NULL); %sql DESCRIBE Locus_table; ##showing the created tables %sql show tables; ##showing all of the data linking the two tables in a 1:1 relationship (it is empty because I have not introduced the data yet) %sql SELECT Germplasm_table.locus, Germplasm_table.germplasm, Germplasm_table.phenotype, Germplasm_table.pubmed, Locus_table.gene, Locus_table.protein_lenght\ FROM Germplasm_table, Locus_table\ WHERE Germplasm_table.locus = Locus_table.locus; ``` **- I have designed a database with two tables: Germplasm_table for Germplasm.tsv and Locus_table for LocusGene.tsv** **- The primary keys to link the two tables in a 1:1 relationship are in the 'locus' column of each table** ## Problem 3: Fill the database Using pymysql, create a Python script that reads the data from these files, and fills the database. There are a variety of strategies to accomplish this. I will give all strategies equal credit - do whichever one you are most confident with. ``` import csv import re with open("Germplasm.tsv", "r") as Germplasm_file: next(Germplasm_file) ##skipping the first row for line in Germplasm_file: line = line.rstrip() ##removing blank spaces created by the \n (newline) character at the end of every line print(line, file=open('Germplasm_wo_header.tsv', 'a')) Germplasm_woh = open("Germplasm_wo_header.tsv", "r") import pymysql.cursors ##connecting to the database (db) germplasm connection = pymysql.connect(host='localhost', user='root', password='root', db='germplasm', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) connection.autocommit(True) try: with connection.cursor() as cursor: sql = "INSERT INTO Germplasm_table (locus, germplasm, phenotype, pubmed) VALUES (%s, %s, %s, %s)" for line in Germplasm_woh.readlines(): field = line.split("\t") ##this splits the lines and inserts each field into a column fields = (field[0], field[1], field[2], field[3]) cursor.execute(sql, fields) connection.commit() finally: print("inserted") #connection.close() %sql SELECT * FROM Germplasm_table; import csv import re with open("LocusGene.tsv", "r") as LocusGene_file: next(LocusGene_file) ##skipping the first row for line in LocusGene_file: line = line.rstrip() ##removing blank spaces created by the \n (newline) character at the end of every line print(line, file=open('LocusGene_wo_header.tsv', 'a')) LocusGene_woh = open("LocusGene_wo_header.tsv", "r") import pymysql.cursors ##connecting to the database (db) germplasm connection = pymysql.connect(host='localhost', user='root', password='root', db='germplasm', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) connection.autocommit(True) try: with connection.cursor() as cursor: sql = "INSERT INTO Locus_table (locus, gene, protein_lenght) VALUES (%s, %s, %s)" for line in LocusGene_woh.readlines(): field = line.split("\t") ##this splits the lines and inserts each field into a column fields = (field[0], field[1], field[2]) cursor.execute(sql, fields) connection.commit() finally: print("inserted") #connection.close() %sql SELECT * FROM Locus_table; ``` To do this exercise, I have asked Andrea Álvarez for some help because I did not understand well what you did in the suggested practice to fill databases. **As 'pubmed' and 'protein_length' columns are for INTEGERS, I have created new TSV files without the header (the first row gave me an error in those columns because of the header).** ## Problem 4: Create reports, written to a file 1. Create a report that shows the full, joined, content of the two database tables (including a header line) 2. Create a joined report that only includes the Genes SKOR and MAA3 3. Create a report that counts the number of entries for each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx) 4. Create a report that shows the average protein length for the genes on each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx) When creating reports 2 and 3, remember the "Don't Repeat Yourself" rule! All reports should be written to **the same file**. You may name the file anything you wish. ``` ##creating an empty text file in current directory report = open('exam2_report.txt', 'x') import pymysql.cursors ##connecting to the database (db) germplasm connection = pymysql.connect(host='localhost', user='root', password='root', db='germplasm', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) connection.autocommit(True) print('Problem 4.1. Create a report that shows the full, joined, content of the two database tables (including a header line):', file=open('exam2_report.txt', 'a')) try: with connection.cursor() as cursor: sql = "SELECT 'locus' AS locus, 'germplasm' AS germplasm, 'phenotype' AS phenotype, 'pubmed' AS pubmed, 'gene' AS gene, 'protein_lenght' AS protein_lenght\ UNION ALL SELECT Germplasm_table.locus, Germplasm_table.germplasm, Germplasm_table.phenotype, Germplasm_table.pubmed, Locus_table.gene, Locus_table.protein_lenght\ FROM Germplasm_table, Locus_table\ WHERE Germplasm_table.locus = Locus_table.locus" cursor.execute(sql) results = cursor.fetchall() for result in results: print(result['locus'],result['germplasm'], result['phenotype'], result['pubmed'], result['gene'], result['protein_lenght'], file=open('exam2_report.txt', 'a')) finally: print("Problem 4.1 report written in exam2_report.txt file") ``` **I have omitted the locus column from the Locus_table in 4.1 and 4.2 for not repeating information.** ``` print('\n\nProblem 4.2. Create a joined report that only includes the Genes SKOR and MAA3:', file=open('exam2_report.txt', 'a')) try: with connection.cursor() as cursor: sql = "SELECT Germplasm_table.locus, Germplasm_table.germplasm, Germplasm_table.phenotype, Germplasm_table.pubmed, Locus_table.gene, Locus_table.protein_lenght\ FROM Germplasm_table, Locus_table\ WHERE Germplasm_table.locus = Locus_table.locus AND (Locus_table.gene = 'SKOR' OR Locus_table.gene = 'MAA3')" cursor.execute(sql) results = cursor.fetchall() for result in results: print(result['locus'],result['germplasm'], result['phenotype'], result['pubmed'], result['gene'], result['protein_lenght'], file=open('exam2_report.txt', 'a')) finally: print("Problem 4.2 report written in exam2_report.txt file") print('\n\nProblem 4.3. Create a report that counts the number of entries for each Chromosome:', file=open('exam2_report.txt', 'a')) try: with connection.cursor() as cursor: i = 1 ##marks the beginning of the loop (i.e., chromosome 1) while i < 6: sql = "SELECT COUNT(*) AS 'Entries for each Chromosome' FROM Germplasm_table WHERE locus REGEXP 'AT"+str(i)+"G'" cursor.execute(sql) results = cursor.fetchall() for result in results: print("- Chromosome", i, "has", result['Entries for each Chromosome'], "entries.", file=open('exam2_report.txt', 'a')) i = i +1 finally: print("Problem 4.3 report written in exam2_report.txt file") print('\n\nProblem 4.4. Create a report that shows the average protein length for the genes on each Chromosome:', file=open('exam2_report.txt', 'a')) try: with connection.cursor() as cursor: i = 1 ##marks the beginning of the loop (i.e., chromosome 1) while i < 6: sql = "SELECT AVG(protein_lenght) AS 'Average protein length for each Chromosome' FROM Locus_table WHERE locus REGEXP 'AT"+str(i)+"G'" cursor.execute(sql) results = cursor.fetchall() for result in results: print("- Average protein length for chromosome", i, "genes is", result['Average protein length for each Chromosome'], file=open('exam2_report.txt', 'a')) i = i +1 finally: print("Problem 4.4 report written in exam2_report.txt file") ##closing the report file with 'Problem 4' answers report.close() ```
github_jupyter
# Quick Multi-Processing Tests ``` import numpy as np import matplotlib.pyplot as plt from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor import time import numba import pandas as pd import pyspark from pyspark.sql import SparkSession ``` Defining a an arbitrary function for testing. The function doesn't mean anything. ``` def fun(x): return x * np.sin(10*x) + np.tan(34*x) + np.log(x) #Calcluate a value for testing fun(10) #Plot the function, for testing x = np.arange(0.1,10,0.5) plt.plot(x,fun(x)); ``` ### Benchmark Without any parallelism, for comparison purposes ``` %%timeit n = int(1e7) ## Using a large number to iterate def f(n): x = np.random.random(n) y = (x * np.sin(10*x) + np.tan(34*x) + np.log(x)) return y f(n) ``` 652 ms without parallel processing ### ProcessPool Execution [ProcessPoolExecutor](https://docs.python.org/3/library/concurrent.futures.html) uses executes the processes asynchronously by using the number of processors assigned, in parallel. ``` %%time with ProcessPoolExecutor(max_workers=4) as executor: result = executor.map(f, [int(1e7) for i in range(10)]) ``` Execution time dropped from 652 ms to 312 ms! This can be further optimized by specifying the number of processors to use and the chunk size. I will skip that for now. ### ThreadPool Execution Similar to `ProcessPool` but uses threads instead of CPU. ``` %%time with ThreadPoolExecutor(max_workers=4) as texecute: result_t = texecute.map(f, [int(1e7) for i in range(10)]) ``` Far worse than the benchmark and the `ProcessPool`. I am not entirely sure why, but most lilelt because the interpreter is allowing only 1 thread to run or is creating an I/O bottleneck. ### Using NUMBA I have used `numba` for JIT compilation for some of my programs for bootstrapping. ``` %%time @numba.jit(nopython=True, parallel=True) def f2(n): x = np.random.random(n) y = (x * np.sin(10*x) + np.tan(34*x) + np.log(x)) return y f2(int(1e7)) ``` 400 ms - so better than the bechmark but almost as good as the `ProcessPool` method ### Using Spark ``` spark=( SparkSession.builder.master("local") .appName("processingtest") .getOrCreate() ) from pyspark.sql.types import FloatType from pyspark.sql.functions import udf n = int(1e7) df = pd.DataFrame({"x":np.random.random(n)}) df.head(3) def f3(x): return (x * np.sin(10*x) + np.tan(34*x) + np.log(x)) func_udf = udf(lambda x: f3(x), FloatType()) df_spark = spark.createDataFrame(df) df_spark.withColumn("udf",func_udf("x")) ``` Inspecting the Spark job shows execution time as 0.4s (400 ms), as good as numba and `ProcessPool`. Spark would be much more scalable. Th eonly challenge here is the data needs to be converted to a tabular/dataframe format first. For most business process modeling scenarios that's usually not required and is an added step. ``` spark.stop() ``` ### References : 1. https://medium.com/@nyomanpradipta120/why-you-should-use-threadpoolexecutor-instead-processpoolexecutor-based-on-ctf-challenge-f51e838df351 2. https://docs.python.org/3/library/concurrent.futures.html
github_jupyter
``` # Import the libraries import numpy as np import pandas as pd import os import seaborn as sns import matplotlib.pyplot as plt import warnings import numpy as np import itertools import statsmodels.api as sm import matplotlib from textwrap import wrap from matplotlib import ticker from datetime import datetime warnings.filterwarnings("ignore") plt.style.use('fivethirtyeight') %matplotlib inline ``` ## Importing the data ``` #print(os.listdir('../data')) df = pd.read_csv('../data/NumberConfirmedOfCases.csv') #df = df.set_index('Date', append=False) #df['Date'] = df.apply(lambda x: datetime.strptime(x['Date'], '%d-%m-%Y').date(), axis=1) #convert the date df = df.groupby('Date')['Cases'].sum().reset_index() #group the data df['Date'] = pd.to_datetime(df['Date']) df['date_delta'] = (df['Date'] - df['Date'].min()) / np.timedelta64(1,'D') df.head() #print(os.listdir('../data')) df = pd.read_csv('../data/NumberConfirmedOfCases.csv') #df = df.set_index('Date', append=False) #df['Date'] = df.apply(lambda x: datetime.strptime(x['Date'], '%d-%m-%Y').date(), axis=1) #convert the date df = df.groupby('Date')['Cases'].sum().reset_index() #group the data df.head() df.describe() df.describe(include='O') df.columns df.shape #print('Time period start: {}\nTime period end: {}'.format(df.year.min(),df.year.max())) ``` ## Visualizing the time series data We are going to use matplotlib to visualise the dataset. ``` # Time series data source: fpp pacakge in R. import matplotlib.pyplot as plt df = pd.read_csv('../data/NumberConfirmedOfCases.csv', parse_dates=['Date'], index_col='Date') df = df.groupby('Date')['Cases'].sum().reset_index() #group the data # Draw Plot def plot_df(df, x, y, title="", xlabel='Date', ylabel='Cases', dpi=100,angle=45): plt.figure(figsize=(8,4), dpi=dpi) plt.plot(x, y, color='tab:red') plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel) plt.xticks(rotation=angle) plt.show() plot_df(df, x=df.Date, y=df.Cases, title='Dayly infiction') df['Date'] = pd.to_datetime(df['Date'], format='%Y-%m-%d') df['Date'].head() df = df.set_index('Date') df.index from pandas import Series from matplotlib import pyplot pyplot.figure(figsize=(6,8), dpi= 100) pyplot.subplot(211) df.Cases.hist() pyplot.subplot(212) df.Cases.plot(kind='kde') pyplot.show() from pylab import rcParams df = pd.read_csv('../data/NumberConfirmedOfCases.csv', parse_dates=['Date'], index_col='Date') df = df.groupby('Date')['Cases'].sum().reset_index() #group the data df['Date'] = pd.to_datetime(df['Date'], format='%Y-%m-%d') df = df.set_index('Date') rcParams['figure.figsize'] = 8,6 decomposition = sm.tsa.seasonal_decompose(df, model='multiplicative', freq=1) fig = decomposition.plot() plt.show() df = pd.read_csv('../data/NumberConfirmedOfCases.csv', parse_dates=['Date'], index_col='Date') df = df.groupby('Date')['Cases'].sum().reset_index() #group the data x = df['Date'].values y1 = df['Cases'].values # Plot fig, ax = plt.subplots(1, 1, figsize=(6,3), dpi= 120) plt.fill_between(x, y1=y1, y2=-y1, alpha=0.5, linewidth=2, color='seagreen') plt.ylim(-50, 50) plt.title('Dayly Infiction (Two Side View)', fontsize=16) plt.hlines(y=0, xmin=np.min(df.Date), xmax=np.max(df.Date), linewidth=.5) plt.xticks(rotation=45) plt.show() ``` ## Boxplot of Month-wise (Seasonal) and Year-wise (trend) Distribution You can group the data at seasonal intervals and see how the values are distributed within a given year or month and how it compares over time. The boxplots make the year-wise and month-wise distributions evident. Also, in a month-wise boxplot, the months of December and January clearly has higher drug sales, which can be attributed to the holiday discounts season. So far, we have seen the similarities to identify the pattern. Now, how to find out any deviations from the usual pattern? ``` # Importing the data df = pd.read_csv('../data/NumberConfirmedOfCases.csv', parse_dates=['Date'], index_col='Date') df = df.groupby('Date')['Cases'].sum().reset_index() #group the data df.reset_index(inplace=True) # Prepare data #df['year'] = [d.year for d in df.Date] df['month'] = [d.strftime('%b') for d in df.Date] df['day']=df['Date'].dt.day df['week']=df['Date'].dt.week months = df['month'].unique() # Plotting fig, axes = plt.subplots(3,1, figsize=(8,16), dpi= 80) sns.boxplot(x='month', y='Cases', data=df, ax=axes[0]) sns.boxplot(x='week', y='Cases', data=df,ax=axes[1]) sns.boxplot(x='day', y='Cases', data=df,ax=axes[2]) axes[0].set_title('Month-wise Box Plot', fontsize=18); axes[1].set_title('Week-wise Box Plot', fontsize=18) axes[1].set_title('Day-wise Box Plot', fontsize=18) plt.show() ``` ## Autocorrelation and partial autocorrelation Autocorrelation measures the relationship between a variable's current value and its past values. Autocorrelation is simply the correlation of a series with its own lags. If a series is significantly autocorrelated, that means, the previous values of the series (lags) may be helpful in predicting the current value. Partial Autocorrelation also conveys similar information but it conveys the pure correlation of a series and its lag, excluding the correlation contributions from the intermediate lags. ``` from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf df = pd.read_csv('../data/NumberConfirmedOfCases.csv') df = df.groupby('Date')['Cases'].sum().reset_index() #group the data pyplot.figure(figsize=(6,8), dpi= 100) pyplot.subplot(211) plot_acf(df.Cases, ax=pyplot.gca(), lags = len(df.Cases)-1) pyplot.subplot(212) plot_pacf(df.Cases, ax=pyplot.gca(), lags = len(df.Cases)-1) pyplot.show() ``` ## Lag Plots A Lag plot is a scatter plot of a time series against a lag of itself. It is normally used to check for autocorrelation. If there is any pattern existing in the series like the one you see below, the series is autocorrelated. If there is no such pattern, the series is likely to be random white noise. ``` from pandas.plotting import lag_plot plt.rcParams.update({'ytick.left' : False, 'axes.titlepad':10}) # Import df = pd.read_csv('../data/NumberConfirmedOfCases.csv') df = df.groupby('Date')['Cases'].sum().reset_index() #group the data # Plot fig, axes = plt.subplots(1, 4, figsize=(10,3), sharex=True, sharey=True, dpi=100) for i, ax in enumerate(axes.flatten()[:4]): lag_plot(df.Cases, lag=i+1, ax=ax, c='firebrick') ax.set_title('Lag ' + str(i+1)) fig.suptitle('Lag Plots of Sun Spots Area)', y=1.15) ``` ## Estimating the forecastability The more regular and repeatable patterns a time series has, the easier it is to forecast. Since we have a small dataset, we apply a Sample Entropy to examine that. Put in mind that, The higher the approximate entropy, the more difficult it is to forecast it. ``` # https://en.wikipedia.org/wiki/Sample_entropy df = pd.read_csv('../data/NumberConfirmedOfCases.csv') df = df.groupby('Date')['Cases'].sum().reset_index() #group the data def SampEn(U, m, r): """Compute Sample entropy""" def _maxdist(x_i, x_j): return max([abs(ua - va) for ua, va in zip(x_i, x_j)]) def _phi(m): x = [[U[j] for j in range(i, i + m - 1 + 1)] for i in range(N - m + 1)] C = [len([1 for j in range(len(x)) if i != j and _maxdist(x[i], x[j]) <= r]) for i in range(len(x))] return sum(C) N = len(U) return -np.log(_phi(m+1) / _phi(m)) print(SampEn(df.Cases, m=2, r=0.2*np.std(df.Cases))) ``` ### Plotting Rolling Statistics We observe that the rolling mean and Standard deviation are not constant with respect to time (increasing trend) The time series is hence not stationary ``` from statsmodels.tsa.stattools import adfuller def test_stationarity(timeseries): #Determing rolling statistics rolmean = pd.Series(timeseries).rolling(window=12).std() rolstd = pd.Series(timeseries).rolling(window=12).mean() #Plot rolling statistics: orig = plt.plot(timeseries, color='blue',label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label = 'Rolling Std') plt.legend(loc='best') plt.title('Rolling Mean & Standard Deviation') plt.show(block=False) #Perform Dickey-Fuller test: print ('Results of Dickey-Fuller Test:') dftest = adfuller(timeseries, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used']) for key,value in dftest[4].items(): dfoutput['Critical Value (%s)'%key] = value print(dfoutput) df = pd.read_csv('../data/NumberConfirmedOfCases.csv') df = df.groupby('Date')['Cases'].sum().reset_index() #group the data test_stationarity(df['Cases']) ``` The standard deviation and th mean are clearly increasing with time therefore, this is not a stationary series. ``` from pylab import rcParams df = pd.read_csv('../data/NumberConfirmedOfCases.csv', parse_dates=['Date'], index_col='Date') df = df.groupby('Date')['Cases'].sum().reset_index() #group the data df['Date'] = pd.to_datetime(df['Date'], format='%Y-%m-%d') df = df.set_index('Date') ts_log = np.log(df) plt.plot(ts_log) ``` # Remove Trend - Smoothing ``` n = int(len(df.Cases)/2) moving_avg = ts_log.rolling(n).mean() plt.plot(ts_log) plt.plot(moving_avg, color='red') ts_log_moving_avg_diff = ts_log.Cases - moving_avg.Cases ts_log_moving_avg_diff.head(n) ts_log_moving_avg_diff.dropna(inplace=True) test_stationarity(ts_log_moving_avg_diff) expwighted_avg = ts_log.ewm(n).mean() plt.plot(ts_log) plt.plot(expwighted_avg, color='red') ts_log_ewma_diff = ts_log.Cases - expwighted_avg.Cases test_stationarity(ts_log_ewma_diff) ts_log_diff = ts_log.Cases - ts_log.Cases.shift() plt.plot(ts_log_diff) ts_log_diff.dropna(inplace=True) test_stationarity(ts_log_diff) ``` ## Autoregressive Integrated Moving Average (ARIMA) In an ARIMA model there are 3 parameters that are used to help model the major aspects of a times series: seasonality, trend, and noise. These parameters are labeled p,d,and q. Number of AR (Auto-Regressive) terms (p): p is the parameter associated with the auto-regressive aspect of the model, which incorporates past values i.e lags of dependent variable. For instance if p is 5, the predictors for x(t) will be x(t-1)….x(t-5). Number of Differences (d): d is the parameter associated with the integrated part of the model, which effects the amount of differencing to apply to a time series. Number of MA (Moving Average) terms (q): q is size of the moving average part window of the model i.e. lagged forecast errors in prediction equation. For instance if q is 5, the predictors for x(t) will be e(t-1)….e(t-5) where e(i) is the difference between the moving average at ith instant and actual value. ``` # ARMA example from statsmodels.tsa.arima_model import ARMA from random import random # fit model model = ARMA(ts_log_diff, order=(2, 1)) model_fit = model.fit(disp=False) model_fit.summary() plt.plot(ts_log_diff) plt.plot(model_fit.fittedvalues, color='red') plt.title('RSS: %.4f'% np.nansum((model_fit.fittedvalues-ts_log_diff)**2)) ts = df.Cases - df.Cases.shift() ts.dropna(inplace=True) pyplot.figure() pyplot.subplot(211) plot_acf(ts, ax=pyplot.gca(),lags=n) pyplot.subplot(212) plot_pacf(ts, ax=pyplot.gca(),lags=n) pyplot.show() #divide into train and validation set train = df[:int(0.8*(len(df)))] valid = df[int(0.8*(len(df))):] #plotting the data train['Cases'].plot() valid['Cases'].plot() #building the model from pmdarima.arima import auto_arima model = auto_arima(train, trace=True, error_action='ignore', suppress_warnings=True) model.fit(train) forecast = model.predict(n_periods=len(valid)) forecast = pd.DataFrame(forecast,index = valid.index,columns=['Prediction']) #plot the predictions for validation set plt.plot(df.Cases, label='Train') #plt.plot(valid, label='Valid') plt.plot(forecast, label='Prediction') plt.show() from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error, median_absolute_error, mean_squared_log_error def evaluate_forecast(y,pred): results = pd.DataFrame({'r2_score':r2_score(y, pred), }, index=[0]) results['mean_absolute_error'] = mean_absolute_error(y, pred) results['median_absolute_error'] = median_absolute_error(y, pred) results['mse'] = mean_squared_error(y, pred) results['msle'] = mean_squared_log_error(y, pred) results['mape'] = mean_absolute_percentage_error(y, pred) results['rmse'] = np.sqrt(results['mse']) return results evaluate_forecast(valid, forecast) train.head() train_prophet = pd.DataFrame() train_prophet['ds'] = train.index train_prophet['y'] = train.Cases.values train_prophet.head() from fbprophet import Prophet #instantiate Prophet with only yearly seasonality as our data is monthly model = Prophet( yearly_seasonality=True, seasonality_mode = 'multiplicative') model.fit(train_prophet) #fit the model with your dataframe # predict for five months in the furure and MS - month start is the frequency future = model.make_future_dataframe(periods = 36, freq = 'MS') future.tail() forecast.columns # now lets make the forecasts forecast = model.predict(future) forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail() fig = model.plot(forecast) #plot the predictions for validation set plt.plot(valid, label='Valid', color = 'red', linewidth = 2) plt.show() model.plot_components(forecast); y_prophet = pd.DataFrame() y_prophet['ds'] = df.index y_prophet['y'] = df.Cases.values y_prophet = y_prophet.set_index('ds') forecast_prophet = forecast.set_index('ds') start_index =5 end_index = 15 evaluate_forecast(y_prophet.y[start_index:end_index], forecast_prophet.yhat[start_index:end_index]) from statsmodels.tsa.arima_model import ARIMA model = ARIMA(df['Cases'], order=(2, 1, 2)) results_ARIMA = model.fit(disp=-1) #plt.plot(ts_log_diff) plt.plot(results_ARIMA.fittedvalues, color='red') #plt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-ts_log_diff)**2)) ```
github_jupyter
# Bayesian Randomized Benchmarking Demo This is a bayesian pyMC3 implementation on top of frequentist interleaved RB from qiskit experiments Based on this [WIP tutorial](https://github.com/Qiskit/qiskit-experiments/blob/main/docs/tutorials/rb_example.ipynb) on july 10 2021 ``` import numpy as np import copy import qiskit_experiments as qe import qiskit.circuit.library as circuits rb = qe.randomized_benchmarking # for retrieving gate calibration from datetime import datetime import qiskit.providers.aer.noise.device as dv # import the bayesian packages import pymc3 as pm import arviz as az import bayesian_fitter as bf simulation = True # make your choice here if simulation: from qiskit.providers.aer import AerSimulator from qiskit.test.mock import FakeParis backend = AerSimulator.from_backend(FakeParis()) else: from qiskit import IBMQ IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') backend = provider.get_backend('ibmq_lima') # type here hardware backend import importlib importlib.reload(bf) ``` # Running 1-qubit RB ``` lengths = np.arange(1, 1000, 100) num_samples = 10 seed = 1010 qubits = [0] # Run an RB experiment on qubit 0 exp1 = rb.StandardRB(qubits, lengths, num_samples=num_samples, seed=seed) expdata1 = exp1.run(backend) # View result data print(expdata1) physical_qubits = [0] nQ = len(qubits) scale = (2 ** nQ - 1) / 2 ** nQ interleaved_gate ='' # retrieve from the frequentist model (fm) analysis # some values,including priors, for the bayesian analysis perr_fm, popt_fm, epc_est_fm, epc_est_fm_err, experiment_type = bf.retrieve_from_lsf(expdata1) EPG_dic = expdata1._analysis_results[0]['EPG'][qubits[0]] # get count data Y = bf.get_GSP_counts(expdata1._data, len(lengths),range(num_samples)) shots = bf.guess_shots(Y) ``` ### Pooled model ``` #build model pooled_model = bf.get_bayesian_model(model_type="pooled",Y=Y,shots=shots,m_gates=lengths, mu_AB=[popt_fm[0],popt_fm[2]],cov_AB=[perr_fm[0],perr_fm[2]], alpha_ref=popt_fm[1], alpha_lower=popt_fm[1]-6*perr_fm[1], alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1])) pm.model_to_graphviz(pooled_model) trace_p = bf.get_trace(pooled_model, target_accept = 0.95) # backend's recorded EPG print(rb.RBUtils.get_error_dict_from_backend(backend, qubits)) bf.RB_bayesian_results(pooled_model, trace_p, lengths, epc_est_fm, epc_est_fm_err, experiment_type, scale, num_samples, Y, shots, physical_qubits, interleaved_gate, backend, EPG_dic = EPG_dic) ``` ### Hierarchical model ``` #build model original_model = bf.get_bayesian_model(model_type="h_sigma",Y=Y,shots=shots,m_gates=lengths, mu_AB=[popt_fm[0],popt_fm[2]],cov_AB=[perr_fm[0],perr_fm[2]], alpha_ref=popt_fm[1], alpha_lower=popt_fm[1]-6*perr_fm[1], alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]), sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015) pm.model_to_graphviz(original_model) trace_o = bf.get_trace(original_model, target_accept = 0.95) # backend's recorded EPG print(rb.RBUtils.get_error_dict_from_backend(backend, qubits)) bf.RB_bayesian_results(original_model, trace_o, lengths, epc_est_fm, epc_est_fm_err, experiment_type, scale, num_samples, Y, shots, physical_qubits, interleaved_gate, backend, EPG_dic = EPG_dic) ```
github_jupyter
<a href="https://colab.research.google.com/github/DSNortsev/CSE-694-Case-Studies-in-Deep-Learning/blob/master/HW2/HW2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import seaborn as sns import pandas as pd import matplotlib.pyplot as plt from pandas.plotting import parallel_coordinates data = sns.load_dataset("iris") ``` ## **Task 1. Brief description on its value and possible applications.** The IRIS dataset has the following characteristics: * 150 examples of Iris flowers * The first four fields are features that are the characteristics of flower examples. All these fields hold float numbers representing flower measurements. * The last column is the label which represents the Iris species. * Balance class distribution meaning that each category has even amount of instances * Has no missing values One example of possible application is for botanists to find an automated way to categorize each Iris flower they find. For instance, to classify based on photographs, or in our case based on the length and width measurements of their sepals and petals. ``` data print(f'CLASS DISTRIBUTION:\n{data.groupby("species").size()}') print(f'\nSHAPE: {data.shape}') print(f'\nTOTAL MISSING VALUES:\n{data.isnull().sum()}\n') ``` _________ ## **Task 2. Summarize and visually report on the Size of this data set including labeling or non-labeled status** For all three species, the respective values of the mean and median of its features are found to be pretty close. This indicates that data is nearly symmetrically distributed with very less presence of outliers. ``` data.groupby('species').agg(['mean', 'median']) ``` Standard deviation (or variance) is an indication of how widely the data is spread about the mean. ``` data.groupby('species').std() ``` The isolated points for each feature that can be seen in the box-plots below are the outliers in the data. Since these are very few in number, it wouldn't have any significant impact on our analysis. ``` sns.set(style="ticks") plt.figure(figsize=(12,10)) plt.subplot(2,2,1) sns.boxplot(x='species',y='sepal_length',data=data) plt.subplot(2,2,2) sns.boxplot(x='species',y='sepal_width',data=data) plt.subplot(2,2,3) sns.boxplot(x='species',y='petal_length',data=data) plt.subplot(2,2,4) sns.boxplot(x='species',y='petal_width',data=data) plt.show() ``` Scatter plot helps to analyze the relationship between 2 features on the x and y ``` sns.pairplot(data) ``` Next, we can make a correlation matrix to see how these features are correlated to each other using a heatmap in the seaborn library. It can be observed that petal measurements are highly correlated, while the sepal one are uncorrelated. Also we can see that petal length is highly correlated with speal length, but not with sepal width. ``` plt.figure(figsize=(10,11)) sns.heatmap(data.corr(),annot=True, square = True) plt.plot() ``` Another way to visualize the data is by parallel coordinate plot, which represents each row as a line. As we have seen below, petal measurements can separate species better than the sepal ones. ``` parallel_coordinates(data, "species", color = ['blue', 'red', 'green']); ``` Now, we can plot a scatter plot between the sepal length and the sepal width to visualise the iris dataset. We can observe that the blue dots(setosa) are quite clear separated from red(versicolor) and green dots(virginica), while separation between red dots and green dots might be a very difficult task given the two features available. ``` labels_names = { 'setosa': 'blue', 'versicolor': 'red', 'virginica': 'green'} for species, color in labels_names.items(): x = data.loc[data['species'] == species]['sepal_length'] y = data.loc[data['species'] == species]['sepal_width'] plt.scatter(x, y, c=color) plt.legend(labels_names.keys()) plt.xlabel('sepal_length') plt.ylabel('sepal_width') plt.show() ``` We can also visualise the data on different features such as petal width and petal length. In this case, the decision boundary between blue, green and red dots can be easily determined, which indicates that using all features for training is a good choice. ``` labels_names = { 'setosa': 'blue', 'versicolor': 'red', 'virginica': 'green'} for species, color in labels_names.items(): x = data.loc[data['species'] == species]['petal_length'] y = data.loc[data['species'] == species]['petal_width'] plt.scatter(x, y, c=color) plt.legend(labels_names.keys()) plt.xlabel('petal_length') plt.ylabel('petal_width') plt.show() ``` ___________ ## **3. Propose and perform Deep Learning using this data set.** Report on your implementation as follows: * Justify your selection of techniques and platform * Explain your results and their applicability &nbsp;&nbsp;&nbsp;&nbsp;In our project we are using python language. There are two well-known libraries for deep learning such as PyTorch and Tensorflow. Each library has its own API implementation for example, Keras is high level API for Tensorflow, while fastai is an API for PyTorch.<br> &nbsp;&nbsp;&nbsp;&nbsp;The Iris classification problem is an example of supervised machine learning: the model is trained from examples that contain labels and for our model we are planning to use Keras wrapper for Tensor flow.<br> &nbsp;&nbsp;&nbsp;&nbsp;The Deep Learning would be performed in the following steps: * Data preprocessing * Model Building * Model Selection<br> &nbsp;&nbsp;&nbsp;&nbsp;In the **Data preprocessing**, we need to create data frames for features and labels, normalize the feature data by converting all values in a range between 0 and 1, convert species labels to numerical representation and then to binary string. Then, the data needs to be split into train and test data sets. # **Phase 1: Data Preprocessing** ### Step 1: Create Dataframes for features and lables ``` import pandas as pd from sklearn.preprocessing import LabelBinarizer, LabelEncoder encoder = LabelBinarizer() le=LabelEncoder() seed = 42 data = sns.load_dataset("iris") # Create X variable with four features X = data.drop(['species'],axis=1) # Convert species to int Y_int = le.fit_transform(data['species']) # Convert species int to binary representation Y_binary = encoder.fit_transform(Y_int) target_names = data['species'].unique() Y = pd.DataFrame(data=Y_binary, columns=target_names) print(f'\nNormalized X_test values:\n{X[:5]}') print(f'\nEncoded Y_test:\n{Y[:5]}') ``` ### Step 2: Create training and testing datasets ``` from sklearn.model_selection import train_test_split # Split data in train and test with percentage proportion 70%/30% X_train,X_test,y_train,y_test = train_test_split(X, Y, test_size=0.30,random_state=seed) print(f'X_train: {X_train.shape}, y_train: {y_train.shape}') print(f'X_test : {X_test.shape}, y_test : {y_test.shape}') ``` ### Step 3: Normalize the feature data, all values should be in a range from 0 to 1 ``` import pandas as pd from sklearn import preprocessing # Normalize X features, make all values between 0 and 1 X_train = pd.DataFrame(preprocessing.normalize(X_train), columns=X_train.columns, index=X_train.index) X_test = pd.DataFrame(preprocessing.normalize(X_test), columns=X_test.columns, index=X_test.index) print(f'Train sample:\n{X_train.head(4)},\nShape: {X_train.shape}') print(f'\nTest sample:\n{X_test.head(4)},\nShape: {X_test.shape}') ``` # **Phase 2: Model Building** ### Step 1: Build model &nbsp;&nbsp;&nbsp;&nbsp;The IRIS is a classification problem, we need to classify if an Iris flower is setosa, versicolor or virginia. Softmax activation function is commonly used in multi classification problems in the output layer, that would return the label with the highest probability.<br> &nbsp;&nbsp;&nbsp;&nbsp;The **tf.keras.Sequential** model is a linear stack of layers. Its constructor takes a list of layer instances, in this case, one tf.keras.layers.Dense layer with 8 nodes, two layers with 10 nodes, and an output layer with 3 nodes representing our label predictions. The first layer’s input_shape parameter corresponds to the number of features from the dataset which is equal 4. <br> &nbsp;&nbsp;&nbsp;&nbsp;The **activation** function determines the output shape of each node in the layer. These non linearities are important, without them the model would be equivalent to a single layer. There are many tf.keras.activations such as tanh, like sigmoid or relu. In our two models we have decided to use "tahn" and "relu" and compare the performance.<br> &nbsp;&nbsp;&nbsp;&nbsp;The ideal number of hidden layers and neurons depends on the problem and the dataset. Like many aspects of machine learning, picking the best shape of the neural network requires a mixture of knowledge and experimentation. As a rule of thumb, increasing the number of hidden layers and neurons typically creates a more powerful model, which requires more data to train effectively. For our illustration we have used two models with 3 and 4 layers. Our expectation that the model with many layers should give a better result. ``` from keras.models import Sequential from keras.layers import Dense def model_with_3_layers(): model = Sequential() model.add(Dense(27, input_dim=4, activation='relu', name='input_layer')) model.add(Dense(9, activation='relu', name='layer_1')) model.add(Dense(3, activation='softmax', name='output_layer')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model def model_with_4_layers(): """build the Keras model callback""" model = Sequential() model.add(Dense(8, input_dim=4, activation='tanh', name='layer_1')) model.add(Dense(10, activation='tanh', name='layer_2')) model.add(Dense(10, activation='tanh', name='layer_3')) model.add(Dense(3, activation='softmax', name='output_layer')) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy']) return model ``` ### Step 2: Create estimator &nbsp;&nbsp;&nbsp;&nbsp;We can also pass arguments in the construction of the KerasClassifier class that will be passed on to the fit() function internally used to train the neural network. Here, we pass the number of epochs as 200 and batch size as 20 to use when training the model. ``` from keras.wrappers.scikit_learn import KerasClassifier estimator = KerasClassifier( build_fn=model_with_4_layers, epochs=200, batch_size=20, verbose=0) ``` ### Step 3: Evaluate The Model with k-Fold Cross Validation &nbsp;&nbsp;&nbsp;&nbsp;Now, the neural network model can be evaluated on a training dataset. The scikit-learn has excellent capability to evaluate models using a suite of techniques. The gold standard for evaluating machine learning models is k-fold cross validation.Since the dataset is quite small, we can pass 5 fold for cross validation. ``` from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score import tensorflow as tf # Suppress Tensorflow warning tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) estimator = KerasClassifier( build_fn=model_with_3_layers, epochs=200, batch_size=20, verbose=0) kfold = KFold(n_splits=5, shuffle=True, random_state=seed) results = cross_val_score(estimator, X_train, y_train, cv=kfold) print(f'Model Performance:\nmean: {results.mean()*100:.2f}\ \nstd: {results.std()*100:.2f}') from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score estimator = KerasClassifier( build_fn=model_with_4_layers, epochs=200, batch_size=20, verbose=0) kfold = KFold(n_splits=5, shuffle=True, random_state=seed) results = cross_val_score(estimator, X_train, y_train, cv=kfold) print(f'Model Performance:\nmean: {results.mean()*100:.2f}\ \nstd: {results.std()*100:.2f}') ``` ### Phase 3 : Model Selection &nbsp;&nbsp;&nbsp;&nbsp; For our illustration, two models have been used. One with 3 layers and another with 4 layers. We can observe that the accuracy are almost the same, but the loss value is much lower in the model with 4 layers. It can be concluded that by adding more layers, it improves the accuracy and lost and at the same time requires more computational power. ``` md1 = model_with_3_layers() md1.fit(X_train, y_train, epochs=200, shuffle=True, # shuffle data randomly. verbose=0 # this will tell keras to print more detailed info ) # Validate the model with test dateset test_error_rate = md1.evaluate(X_test, y_test, verbose=0) print(f'{md1.metrics_names[1]}: {test_error_rate[1]*100:.2f}') print(f'{md1.metrics_names[0]}: {test_error_rate[0]*100:.2f}') md2 = model_with_4_layers() md2.fit(X_train, y_train, epochs=200, shuffle=True, # shuffle data randomly. verbose=0 # this will tell keras to print more detailed info ) # Validate the model with test dateset test_error_rate = md2.evaluate(X_test, y_test, verbose=0) print(f'{md2.metrics_names[1]}: {test_error_rate[1]*100:.2f}') print(f'{md2.metrics_names[0]}: {test_error_rate[0]*100:.2f}') ``` ### STEP 4: Evaluate model performs on the test data ``` from sklearn.metrics import confusion_matrix def evaluate_performace(actual, expected): """ Function accepts two lists with actual and expected lables """ flowers = {0:'setosa', 1:'versicolor', 2:'virginica'} print(f'Flowers in test set: \nSetosa={y_test["setosa"].sum()}\ \nVersicolor={y_test["versicolor"].sum()}\ \nVirginica={y_test["virginica"].sum()}') for act,exp in zip(actual, expected): if act != exp: print(f'ERROR: {flowers[exp]} predicted as {flowers[act]}') for i,model in enumerate((md1, md2), 1): print(f'\nEVALUATION OF MODEL {i}') predicted_targets = model.predict_classes(X_test) true_targets = encoder.inverse_transform(y_test.values) evaluate_performace(predicted_targets, true_targets) # Calculate the confusion matrix using sklearn.metrics fig, ax =plt.subplots(1,1) conf_matrix = confusion_matrix(true_targets, predicted_targets) sns.heatmap(conf_matrix, annot=True, cmap='Blues', xticklabels=target_names,yticklabels=target_names) print('\n') ``` &nbsp;&nbsp;&nbsp;&nbsp;From the confusion matrix above we can see that the second model with 4 layers outperformed the model with 3 layers and the prediction was wrong only once for versicolor species. ___ ## **4. Find a publication or report that uses this same data set and compare it’s methodology and results to what you did** &nbsp;&nbsp;&nbsp;&nbsp;In the last assignment, we would like to analyze the approach that has been suggested by TensorFlow in "Custom Training: walkthrough" report [1]. The same deep learning framework has been used. Feature and labels are stored in tf.Tensor structure, where in my model all data was stored in pandas.DataFrame. Label data is converted to numerical representation, where in our side we have decided to use binary string representation. The Author decided to not normalize the feature data, to be more specific to represent in the range from 0 to 1. It is a preferable approach, because it allows the model to learn faster.<br> &nbsp;&nbsp;&nbsp;&nbsp;Suggested model is using a Sequential model which is a linear stack of layers. The stack is built with 4 layers, input and output layers and two Dense layers with 10 nodes each can be simply represented as 4/10/10/3. One of our models that showed better accuracy and loss contains 5 layers which can be represented as follows: 4/8/10/10/3. The relu activation function has been chosen for inner layers, that outputs 0 if input is negative or 0, and returns the value when it is positive. Both models are using **SparseCategoricalCrossentropy** function which calculates the loss value by taking the model's class probability predictions and the desired labels, and returns the average loss across all examples. To minimize the loss the Stochastic gradient algorithm has been chosen with learning rate 0.01, in contrast our model is built with Adam which is an extension to stochastic gradient descent algorithm.<br> &nbsp;&nbsp;&nbsp;&nbsp;Both models are run with almost the same number of epochs. It can be observed that both models return almost the same accuracy and loss. &nbsp;&nbsp;&nbsp;&nbsp;To summarize, we can see that both models performed similarly, but in our approach to the same result can be achieved by adding the new inner layer that does help to improve the model but it might be very resource-intensive.<br> **References:**<br> [1] - "Custom training: walkthrough", https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/customization/custom_training_walkthrough.ipynb#scrollTo=rwxGnsA92emp, Accessed 2018, The TensorFlow Authors ``` # To convert colab notebook to pdf !apt-get install texlive texlive-xetex texlive-latex-extra pandoc >/dev/null !pip install pypandoc >/dev/null from google.colab import drive drive.mount('/content/drive') !cp drive/My\ Drive/Colab\ Notebooks/HW2.ipynb ./ !jupyter nbconvert --to PDF "HW2.ipynb" 2>/dev/null !cp ./HW2.pdf drive/My\ Drive/Colab\ Notebooks/ ```
github_jupyter
``` import numpy as np import xarray as xr import math import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import os import f90nml from salishsea_tools import metric_tools_5x5 as met %matplotlib inline plt.rcParams['image.cmap'] = 'jet' plt.rc('xtick', labelsize=20) plt.rc('ytick', labelsize=20) reference_namelist_file = '/data/jpetrie/MEOPAR/SS-run-sets/SS-SMELT/namelists/namelist_pisces_cfg_5x5_NewIC' reference_bio_params = f90nml.read(reference_namelist_file) tracer_file = 'SS5x5_1h_20150201_20150501_ptrc_T.nc' param_metrics = pd.DataFrame() batch_directories = [ #'/data/jpetrie/MEOPAR/SalishSea/results/nampiszoo_june_14/', '/data/jpetrie/MEOPAR/SalishSea/results/nampisopt_june_14/', #'/data/jpetrie/MEOPAR/SalishSea/results/nampismes_june_14/', #'/data/jpetrie/MEOPAR/SalishSea/results/nampissink_june_17/', #'/data/jpetrie/MEOPAR/SalishSea/results/nampisprod_june_16/', #'/data/jpetrie/MEOPAR/SalishSea/results/nampismort_june_17/', #'/data/jpetrie/MEOPAR/SalishSea/results/nampisrem_june_17/', #'/data/jpetrie/MEOPAR/SalishSea/results/nampismezo_june_20/', ] metric_func_list = [ met.mean_NH4_at_depth, met.mean_NO3_at_depth, met.mean_DON_at_depth, met.mean_PON_at_depth, met.time_of_peak_PHY2, met.time_surface_NO3_drops_below_4, met.peak_3_day_biomass, ] for batch_dir in batch_directories: for file in os.listdir(batch_dir): if os.path.isfile(batch_dir + '/' + file + '/' + tracer_file) and 'zz_frac_waste' not in file: last_underscore = file.rfind('_') first_underscore = file.find('_') param_section = file[:first_underscore] param_name = file[(first_underscore+1):last_underscore] param_val = float(file[(last_underscore+1):]) param_desc = param_section + ' ' + param_name original_val = reference_bio_params[param_section][param_name] if type(original_val) is list: param_scale = round(param_val/original_val[0], 3) else: param_scale = round(param_val/original_val, 3) grid_t = xr.open_dataset(batch_dir + '/' + file +'/' + tracer_file) for metric_func in metric_func_list: metric_val = metric_func(grid_t) metric_name = metric_func.__name__ # inefficient to keep appending, but much less expensive than other parts of the loop so it doesn't matter param_metrics = param_metrics.append(pd.DataFrame({"PARAM_SECTION":[param_section], "PARAM_NAME":[param_name], "PARAM_DESC":[param_desc], "PARAM_VAL":[param_val], "PARAM_SCALE":[param_scale], "METRIC_NAME":[metric_name], "METRIC_VAL": [metric_val]})) sns.set(font_scale = 2) plt.rcParams['image.cmap'] = 'jet' param_metrics = param_metrics.sort_values(["PARAM_SCALE", "PARAM_DESC"]) fg = sns.FacetGrid(data=param_metrics.query("PARAM_SCALE < 10"), col = "METRIC_NAME", hue = "PARAM_DESC", sharex=False, sharey=False, col_wrap = 2, size = 10) fg.map(plt.scatter, "PARAM_SCALE", "METRIC_VAL", s = 80) fg.map(plt.plot, "PARAM_SCALE", "METRIC_VAL") fg.add_legend() fg.set_titles("{col_name}") param_metrics["PARAM_SCALE"] = "PARAM_SCALE_" + param_metrics["PARAM_SCALE"].astype(str) wide_format_metrics = pd.pivot_table(param_metrics, values='METRIC_VAL', index=['PARAM_SECTION','PARAM_NAME', 'PARAM_DESC', 'METRIC_NAME'], columns=['PARAM_SCALE']) wide_format_metrics.reset_index(inplace=True) wide_format_metrics["SLOPE"] = (wide_format_metrics["PARAM_SCALE_1.1"] - wide_format_metrics["PARAM_SCALE_0.9"])/0.2 wide_format_metrics cmap = plt.get_cmap('Set2') for metric_name in np.unique(wide_format_metrics["METRIC_NAME"]): x = wide_format_metrics.query("METRIC_NAME == @metric_name") categories = np.unique(x["PARAM_SECTION"]) colors = np.linspace(0, 1, len(categories)) colordict = dict(zip(categories, colors)) x["COLOR"] = x["PARAM_SECTION"].apply(lambda x: colordict[x]) x.plot.barh("PARAM_DESC", "SLOPE", figsize = (7,0.6*len(x["SLOPE"])), color = cmap(x.COLOR), title = metric_name) ```
github_jupyter
# Plagiarism Detection, Feature Engineering In this project, you will be tasked with building a plagiarism detector that examines an answer text file and performs binary classification; labeling that file as either plagiarized or not, depending on how similar that text file is to a provided, source text. Your first task will be to create some features that can then be used to train a classification model. This task will be broken down into a few discrete steps: * Clean and pre-process the data. * Define features for comparing the similarity of an answer text and a source text, and extract similarity features. * Select "good" features, by analyzing the correlations between different features. * Create train/test `.csv` files that hold the relevant features and class labels for train/test data points. In the _next_ notebook, Notebook 3, you'll use the features and `.csv` files you create in _this_ notebook to train a binary classification model in a SageMaker notebook instance. You'll be defining a few different similarity features, as outlined in [this paper](https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c412841_developing-a-corpus-of-plagiarised-short-answers/developing-a-corpus-of-plagiarised-short-answers.pdf), which should help you build a robust plagiarism detector! To complete this notebook, you'll have to complete all given exercises and answer all the questions in this notebook. > All your tasks will be clearly labeled **EXERCISE** and questions as **QUESTION**. It will be up to you to decide on the features to include in your final training and test data. --- ## Read in the Data The cell below will download the necessary, project data and extract the files into the folder `data/`. This data is a slightly modified version of a dataset created by Paul Clough (Information Studies) and Mark Stevenson (Computer Science), at the University of Sheffield. You can read all about the data collection and corpus, at [their university webpage](https://ir.shef.ac.uk/cloughie/resources/plagiarism_corpus.html). > **Citation for data**: Clough, P. and Stevenson, M. Developing A Corpus of Plagiarised Short Answers, Language Resources and Evaluation: Special Issue on Plagiarism and Authorship Analysis, In Press. [Download] ``` # NOTE: # you only need to run this cell if you have not yet downloaded the data # otherwise you may skip this cell or comment it out #!wget https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c4147f9_data/data.zip #!unzip data # import libraries import pandas as pd import numpy as np import os ``` This plagiarism dataset is made of multiple text files; each of these files has characteristics that are is summarized in a `.csv` file named `file_information.csv`, which we can read in using `pandas`. ``` csv_file = 'data/file_information.csv' plagiarism_df = pd.read_csv(csv_file) # print out the first few rows of data info plagiarism_df.head() ``` ## Types of Plagiarism Each text file is associated with one **Task** (task A-E) and one **Category** of plagiarism, which you can see in the above DataFrame. ### Tasks, A-E Each text file contains an answer to one short question; these questions are labeled as tasks A-E. For example, Task A asks the question: "What is inheritance in object oriented programming?" ### Categories of plagiarism Each text file has an associated plagiarism label/category: **1. Plagiarized categories: `cut`, `light`, and `heavy`.** * These categories represent different levels of plagiarized answer texts. `cut` answers copy directly from a source text, `light` answers are based on the source text but include some light rephrasing, and `heavy` answers are based on the source text, but *heavily* rephrased (and will likely be the most challenging kind of plagiarism to detect). **2. Non-plagiarized category: `non`.** * `non` indicates that an answer is not plagiarized; the Wikipedia source text is not used to create this answer. **3. Special, source text category: `orig`.** * This is a specific category for the original, Wikipedia source text. We will use these files only for comparison purposes. --- ## Pre-Process the Data In the next few cells, you'll be tasked with creating a new DataFrame of desired information about all of the files in the `data/` directory. This will prepare the data for feature extraction and for training a binary, plagiarism classifier. ### EXERCISE: Convert categorical to numerical data You'll notice that the `Category` column in the data, contains string or categorical values, and to prepare these for feature extraction, we'll want to convert these into numerical values. Additionally, our goal is to create a binary classifier and so we'll need a binary class label that indicates whether an answer text is plagiarized (1) or not (0). Complete the below function `numerical_dataframe` that reads in a `file_information.csv` file by name, and returns a *new* DataFrame with a numerical `Category` column and a new `Class` column that labels each answer as plagiarized or not. Your function should return a new DataFrame with the following properties: * 4 columns: `File`, `Task`, `Category`, `Class`. The `File` and `Task` columns can remain unchanged from the original `.csv` file. * Convert all `Category` labels to numerical labels according to the following rules (a higher value indicates a higher degree of plagiarism): * 0 = `non` * 1 = `heavy` * 2 = `light` * 3 = `cut` * -1 = `orig`, this is a special value that indicates an original file. * For the new `Class` column * Any answer text that is not plagiarized (`non`) should have the class label `0`. * Any plagiarized answer texts should have the class label `1`. * And any `orig` texts will have a special label `-1`. ### Expected output After running your function, you should get a DataFrame with rows that looks like the following: ``` File Task Category Class 0 g0pA_taska.txt a 0 0 1 g0pA_taskb.txt b 3 1 2 g0pA_taskc.txt c 2 1 3 g0pA_taskd.txt d 1 1 4 g0pA_taske.txt e 0 0 ... ... 99 orig_taske.txt e -1 -1 ``` ``` # Read in a csv file and return a transformed dataframe def numerical_dataframe(csv_file='data/file_information.csv'): '''Reads in a csv file which is assumed to have `File`, `Category` and `Task` columns. This function does two things: 1) converts `Category` column values to numerical values 2) Adds a new, numerical `Class` label column. The `Class` column will label plagiarized answers as 1 and non-plagiarized as 0. Source texts have a special label, -1. :param csv_file: The directory for the file_information.csv file :return: A dataframe with numerical categories and a new `Class` label column''' orig_df = pd.read_csv(csv_file) new_df = orig_df[['File', 'Task']] new_df['Category'] = [0 if x == 'non' else 1 if x == 'heavy' else 2 if x == 'light' else 3 if x == 'cut' else -1 for x in orig_df['Category']] new_df['Class'] = [0 if x == 0 else 1 if x > 0 else -1 for x in new_df['Category']] return new_df numerical_dataframe().head(100) ``` ### Test cells Below are a couple of test cells. The first is an informal test where you can check that your code is working as expected by calling your function and printing out the returned result. The **second** cell below is a more rigorous test cell. The goal of a cell like this is to ensure that your code is working as expected, and to form any variables that might be used in _later_ tests/code, in this case, the data frame, `transformed_df`. > The cells in this notebook should be run in chronological order (the order they appear in the notebook). This is especially important for test cells. Often, later cells rely on the functions, imports, or variables defined in earlier cells. For example, some tests rely on previous tests to work. These tests do not test all cases, but they are a great way to check that you are on the right track! ``` # informal testing, print out the results of a called function # create new `transformed_df` transformed_df = numerical_dataframe(csv_file ='data/file_information.csv') # check work # check that all categories of plagiarism have a class label = 1 transformed_df.head(20) # test cell that creates `transformed_df`, if tests are passed """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # importing tests import problem_unittests as tests # test numerical_dataframe function tests.test_numerical_df(numerical_dataframe) # if above test is passed, create NEW `transformed_df` transformed_df = numerical_dataframe(csv_file ='data/file_information.csv') # check work print('\nExample data: ') transformed_df.head() ``` ## Text Processing & Splitting Data Recall that the goal of this project is to build a plagiarism classifier. At it's heart, this task is a comparison text; one that looks at a given answer and a source text, compares them and predicts whether an answer has plagiarized from the source. To effectively do this comparison, and train a classifier we'll need to do a few more things: pre-process all of our text data and prepare the text files (in this case, the 95 answer files and 5 original source files) to be easily compared, and split our data into a `train` and `test` set that can be used to train a classifier and evaluate it, respectively. To this end, you've been provided code that adds additional information to your `transformed_df` from above. The next two cells need not be changed; they add two additional columns to the `transformed_df`: 1. A `Text` column; this holds all the lowercase text for a `File`, with extraneous punctuation removed. 2. A `Datatype` column; this is a string value `train`, `test`, or `orig` that labels a data point as part of our train or test set The details of how these additional columns are created can be found in the `helpers.py` file in the project directory. You're encouraged to read through that file to see exactly how text is processed and how data is split. Run the cells below to get a `complete_df` that has all the information you need to proceed with plagiarism detection and feature engineering. ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import helpers # create a text column text_df = helpers.create_text_column(transformed_df) text_df.head() # after running the cell above # check out the processed text for a single file, by row index row_idx = 0 # feel free to change this index sample_text = text_df.iloc[0]['Text'] print('Sample processed text:\n\n', sample_text) ``` ## Split data into training and test sets The next cell will add a `Datatype` column to a given DataFrame to indicate if the record is: * `train` - Training data, for model training. * `test` - Testing data, for model evaluation. * `orig` - The task's original answer from wikipedia. ### Stratified sampling The given code uses a helper function which you can view in the `helpers.py` file in the main project directory. This implements [stratified random sampling](https://en.wikipedia.org/wiki/Stratified_sampling) to randomly split data by task & plagiarism amount. Stratified sampling ensures that we get training and test data that is fairly evenly distributed across task & plagiarism combinations. Approximately 26% of the data is held out for testing and 74% of the data is used for training. The function **train_test_dataframe** takes in a DataFrame that it assumes has `Task` and `Category` columns, and, returns a modified frame that indicates which `Datatype` (train, test, or orig) a file falls into. This sampling will change slightly based on a passed in *random_seed*. Due to a small sample size, this stratified random sampling will provide more stable results for a binary plagiarism classifier. Stability here is smaller *variance* in the accuracy of classifier, given a random seed. ``` random_seed = 1 # can change; set for reproducibility """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import helpers # create new df with Datatype (train, test, orig) column # pass in `text_df` from above to create a complete dataframe, with all the information you need complete_df = helpers.train_test_dataframe(text_df, random_seed=random_seed) # check results complete_df.head() ``` # Determining Plagiarism Now that you've prepared this data and created a `complete_df` of information, including the text and class associated with each file, you can move on to the task of extracting similarity features that will be useful for plagiarism classification. > Note: The following code exercises, assume that the `complete_df` as it exists now, will **not** have its existing columns modified. The `complete_df` should always include the columns: `['File', 'Task', 'Category', 'Class', 'Text', 'Datatype']`. You can add additional columns, and you can create any new DataFrames you need by copying the parts of the `complete_df` as long as you do not modify the existing values, directly. --- # Similarity Features One of the ways we might go about detecting plagiarism, is by computing **similarity features** that measure how similar a given answer text is as compared to the original wikipedia source text (for a specific task, a-e). The similarity features you will use are informed by [this paper on plagiarism detection](https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c412841_developing-a-corpus-of-plagiarised-short-answers/developing-a-corpus-of-plagiarised-short-answers.pdf). > In this paper, researchers created features called **containment** and **longest common subsequence**. Using these features as input, you will train a model to distinguish between plagiarized and not-plagiarized text files. ## Feature Engineering Let's talk a bit more about the features we want to include in a plagiarism detection model and how to calculate such features. In the following explanations, I'll refer to a submitted text file as a **Student Answer Text (A)** and the original, wikipedia source file (that we want to compare that answer to) as the **Wikipedia Source Text (S)**. ### Containment Your first task will be to create **containment features**. To understand containment, let's first revisit a definition of [n-grams](https://en.wikipedia.org/wiki/N-gram). An *n-gram* is a sequential word grouping. For example, in a line like "bayes rule gives us a way to combine prior knowledge with new information," a 1-gram is just one word, like "bayes." A 2-gram might be "bayes rule" and a 3-gram might be "combine prior knowledge." > Containment is defined as the **intersection** of the n-gram word count of the Wikipedia Source Text (S) with the n-gram word count of the Student Answer Text (S) *divided* by the n-gram word count of the Student Answer Text. $$ \frac{\sum{count(\text{ngram}_{A}) \cap count(\text{ngram}_{S})}}{\sum{count(\text{ngram}_{A})}} $$ If the two texts have no n-grams in common, the containment will be 0, but if _all_ their n-grams intersect then the containment will be 1. Intuitively, you can see how having longer n-gram's in common, might be an indication of cut-and-paste plagiarism. In this project, it will be up to you to decide on the appropriate `n` or several `n`'s to use in your final model. ### EXERCISE: Create containment features Given the `complete_df` that you've created, you should have all the information you need to compare any Student Answer Text (A) with its appropriate Wikipedia Source Text (S). An answer for task A should be compared to the source text for task A, just as answers to tasks B, C, D, and E should be compared to the corresponding original source text. In this exercise, you'll complete the function, `calculate_containment` which calculates containment based upon the following parameters: * A given DataFrame, `df` (which is assumed to be the `complete_df` from above) * An `answer_filename`, such as 'g0pB_taskd.txt' * An n-gram length, `n` ### Containment calculation The general steps to complete this function are as follows: 1. From *all* of the text files in a given `df`, create an array of n-gram counts; it is suggested that you use a [CountVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) for this purpose. 2. Get the processed answer and source texts for the given `answer_filename`. 3. Calculate the containment between an answer and source text according to the following equation. >$$ \frac{\sum{count(\text{ngram}_{A}) \cap count(\text{ngram}_{S})}}{\sum{count(\text{ngram}_{A})}} $$ 4. Return that containment value. You are encouraged to write any helper functions that you need to complete the function below. ``` complete_df[complete_df['File'] == 'g0pA_taska.txt'].iloc[0]['Text'] 'g0pA_taska.txt'.replace('g0pA','orig') s = 'g0pA_taska.txt' 'orig' + s[4:] from sklearn.feature_extraction.text import CountVectorizer def get_texts(df, filename): answer = df[df['File'] == filename].iloc[0]['Text'] orig_filename = 'orig' + filename[4:] orig = df[df['File'] == orig_filename].iloc[0]['Text'] #print(filename) #print(orig_filename) return answer, orig # Calculate the ngram containment for one answer file/source file pair in a df def calculate_containment(df, n, answer_filename): '''Calculates the containment between a given answer text and its associated source text. This function creates a count of ngrams (of a size, n) for each text file in our data. Then calculates the containment by finding the ngram count for a given answer text, and its associated source text, and calculating the normalized intersection of those counts. :param df: A dataframe with columns, 'File', 'Task', 'Category', 'Class', 'Text', and 'Datatype' :param n: An integer that defines the ngram size :param answer_filename: A filename for an answer text in the df, ex. 'g0pB_taskd.txt' :return: A single containment value that represents the similarity between an answer text and its source text. ''' a_text, s_text = get_texts(df, answer_filename) # instantiate an ngram counter counts = CountVectorizer(analyzer='word', ngram_range=(n,n)) ngrams = counts.fit_transform([a_text, s_text]) ngram_array = ngrams.toarray() return sum(np.amin(ngram_array,axis=0))/sum(ngram_array[0]) ``` ### Test cells After you've implemented the containment function, you can test out its behavior. The cell below iterates through the first few files, and calculates the original category _and_ containment values for a specified n and file. >If you've implemented this correctly, you should see that the non-plagiarized have low or close to 0 containment values and that plagiarized examples have higher containment values, closer to 1. Note what happens when you change the value of n. I recommend applying your code to multiple files and comparing the resultant containment values. You should see that the highest containment values correspond to files with the highest category (`cut`) of plagiarism level. ``` # select a value for n n = 1 # indices for first few files test_indices = range(4) # iterate through files and calculate containment category_vals = [] containment_vals = [] for i in test_indices: # get level of plagiarism for a given file index category_vals.append(complete_df.loc[i, 'Category']) # calculate containment for given file and n filename = complete_df.loc[i, 'File'] print(filename) c = calculate_containment(complete_df, n, filename) containment_vals.append(c) # print out result, does it make sense? print('Original category values: \n', category_vals) print() print(str(n)+'-gram containment values: \n', containment_vals) ngram_1 = [0.39814814814814814, 1.0, 0.86936936936936937, 0.5935828877005348] print('Expected values: \n', ngram_1) assert all(np.isclose(containment_vals, ngram_1, rtol=1e-04)), \ 'n=1 calculations are incorrect. Double check the intersection calculation.' # run this test cell """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # test containment calculation # params: complete_df from before, and containment function import problem_unittests as tests tests.test_containment(complete_df, calculate_containment) ``` ### QUESTION 1: Why can we calculate containment features across *all* data (training & test), prior to splitting the DataFrame for modeling? That is, what about the containment calculation means that the test and training data do not influence each other? **Answer:** Bit ambiguous lengthy question. But I think, since containment value is just one feature or one variable for our data, and it is not we are changing origin data or running prediction, in other words it is a pre-step before running actual trainnning. So we can calculate it for all data at this stage. --- ## Longest Common Subsequence Containment a good way to find overlap in word usage between two documents; it may help identify cases of cut-and-paste as well as paraphrased levels of plagiarism. Since plagiarism is a fairly complex task with varying levels, it's often useful to include other measures of similarity. The paper also discusses a feature called **longest common subsequence**. > The longest common subsequence is the longest string of words (or letters) that are *the same* between the Wikipedia Source Text (S) and the Student Answer Text (A). This value is also normalized by dividing by the total number of words (or letters) in the Student Answer Text. In this exercise, we'll ask you to calculate the longest common subsequence of words between two texts. ### EXERCISE: Calculate the longest common subsequence Complete the function `lcs_norm_word`; this should calculate the *longest common subsequence* of words between a Student Answer Text and corresponding Wikipedia Source Text. It may be helpful to think of this in a concrete example. A Longest Common Subsequence (LCS) problem may look as follows: * Given two texts: text A (answer text) of length n, and string S (original source text) of length m. Our goal is to produce their longest common subsequence of words: the longest sequence of words that appear left-to-right in both texts (though the words don't have to be in continuous order). * Consider: * A = "i think pagerank is a link analysis algorithm used by google that uses a system of weights attached to each element of a hyperlinked set of documents" * S = "pagerank is a link analysis algorithm used by the google internet search engine that assigns a numerical weighting to each element of a hyperlinked set of documents" * In this case, we can see that the start of each sentence of fairly similar, having overlap in the sequence of words, "pagerank is a link analysis algorithm used by" before diverging slightly. Then we **continue moving left -to-right along both texts** until we see the next common sequence; in this case it is only one word, "google". Next we find "that" and "a" and finally the same ending "to each element of a hyperlinked set of documents". * Below, is a clear visual of how these sequences were found, sequentially, in each text. <img src='notebook_ims/common_subseq_words.png' width=40% /> * Now, those words appear in left-to-right order in each document, sequentially, and even though there are some words in between, we count this as the longest common subsequence between the two texts. * If I count up each word that I found in common I get the value 20. **So, LCS has length 20**. * Next, to normalize this value, divide by the total length of the student answer; in this example that length is only 27. **So, the function `lcs_norm_word` should return the value `20/27` or about `0.7408`.** In this way, LCS is a great indicator of cut-and-paste plagiarism or if someone has referenced the same source text multiple times in an answer. ### LCS, dynamic programming If you read through the scenario above, you can see that this algorithm depends on looking at two texts and comparing them word by word. You can solve this problem in multiple ways. First, it may be useful to `.split()` each text into lists of comma separated words to compare. Then, you can iterate through each word in the texts and compare them, adding to your value for LCS as you go. The method I recommend for implementing an efficient LCS algorithm is: using a matrix and dynamic programming. **Dynamic programming** is all about breaking a larger problem into a smaller set of subproblems, and building up a complete result without having to repeat any subproblems. This approach assumes that you can split up a large LCS task into a combination of smaller LCS tasks. Let's look at a simple example that compares letters: * A = "ABCD" * S = "BD" We can see right away that the longest subsequence of _letters_ here is 2 (B and D are in sequence in both strings). And we can calculate this by looking at relationships between each letter in the two strings, A and S. Here, I have a matrix with the letters of A on top and the letters of S on the left side: <img src='notebook_ims/matrix_1.png' width=40% /> This starts out as a matrix that has as many columns and rows as letters in the strings S and O **+1** additional row and column, filled with zeros on the top and left sides. So, in this case, instead of a 2x4 matrix it is a 3x5. Now, we can fill this matrix up by breaking it into smaller LCS problems. For example, let's first look at the shortest substrings: the starting letter of A and S. We'll first ask, what is the Longest Common Subsequence between these two letters "A" and "B"? **Here, the answer is zero and we fill in the corresponding grid cell with that value.** <img src='notebook_ims/matrix_2.png' width=30% /> Then, we ask the next question, what is the LCS between "AB" and "B"? **Here, we have a match, and can fill in the appropriate value 1**. <img src='notebook_ims/matrix_3_match.png' width=25% /> If we continue, we get to a final matrix that looks as follows, with a **2** in the bottom right corner. <img src='notebook_ims/matrix_6_complete.png' width=25% /> The final LCS will be that value **2** *normalized* by the number of n-grams in A. So, our normalized value is 2/4 = **0.5**. ### The matrix rules One thing to notice here is that, you can efficiently fill up this matrix one cell at a time. Each grid cell only depends on the values in the grid cells that are directly on top and to the left of it, or on the diagonal/top-left. The rules are as follows: * Start with a matrix that has one extra row and column of zeros. * As you traverse your string: * If there is a match, fill that grid cell with the value to the top-left of that cell *plus* one. So, in our case, when we found a matching B-B, we added +1 to the value in the top-left of the matching cell, 0. * If there is not a match, take the *maximum* value from either directly to the left or the top cell, and carry that value over to the non-match cell. <img src='notebook_ims/matrix_rules.png' width=50% /> After completely filling the matrix, **the bottom-right cell will hold the non-normalized LCS value**. This matrix treatment can be applied to a set of words instead of letters. Your function should apply this to the words in two texts and return the normalized LCS value. ``` ss = "aas" for i in range(1,len(ss)+1): print(ss[i-1]) d = np.zeros((2, 2)) d[0][0] = 1 d import re def clean_text(sentence): return [re.sub(r'\W+', '', c) for c in sentence.split()] # Compute the normalized LCS given an answer text and a source text def lcs_norm_word(answer_text, source_text): '''Computes the longest common subsequence of words in two texts; returns a normalized value. :param answer_text: The pre-processed text for an answer text :param source_text: The pre-processed text for an answer's associated source text :return: A normalized LCS value''' answer_words = clean_text(answer_text) source_words = clean_text(source_text) lcs = 0 la = len(answer_words) ls = len(source_words) table = np.zeros((la+1, ls+1)) for i in range(1,la+1): for j in range(1,ls+1): o = max(table[i-1][j], table[i][j-1]) if (answer_words[i-1] == source_words[j-1]): o = table[i-1][j-1] + 1 table[i][j] = o lcs = o return lcs/la ``` ### Test cells Let's start by testing out your code on the example given in the initial description. In the below cell, we have specified strings A (answer text) and S (original source text). We know that these texts have 20 words in common and the submitted answer is 27 words long, so the normalized, longest common subsequence should be 20/27. ``` # Run the test scenario from above # does your function return the expected value? A = "i think pagerank is a link analysis algorithm used by google that uses a system of weights attached to each element of a hyperlinked set of documents" S = "pagerank is a link analysis algorithm used by the google internet search engine that assigns a numerical weighting to each element of a hyperlinked set of documents" # calculate LCS lcs = lcs_norm_word(A, S) print('LCS = ', lcs) # expected value test assert lcs==20/27., "Incorrect LCS value, expected about 0.7408, got "+str(lcs) print('Test passed!') ``` This next cell runs a more rigorous test. ``` # run test cell """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # test lcs implementation # params: complete_df from before, and lcs_norm_word function tests.test_lcs(complete_df, lcs_norm_word) ``` Finally, take a look at a few resultant values for `lcs_norm_word`. Just like before, you should see that higher values correspond to higher levels of plagiarism. ``` # test on your own test_indices = range(5) # look at first few files category_vals = [] lcs_norm_vals = [] # iterate through first few docs and calculate LCS for i in test_indices: category_vals.append(complete_df.loc[i, 'Category']) # get texts to compare answer_text = complete_df.loc[i, 'Text'] task = complete_df.loc[i, 'Task'] # we know that source texts have Class = -1 orig_rows = complete_df[(complete_df['Class'] == -1)] orig_row = orig_rows[(orig_rows['Task'] == task)] source_text = orig_row['Text'].values[0] # calculate lcs lcs_val = lcs_norm_word(answer_text, source_text) lcs_norm_vals.append(lcs_val) # print out result, does it make sense? print('Original category values: \n', category_vals) print() print('Normalized LCS values: \n', lcs_norm_vals) ``` --- # Create All Features Now that you've completed the feature calculation functions, it's time to actually create multiple features and decide on which ones to use in your final model! In the below cells, you're provided two helper functions to help you create multiple features and store those in a DataFrame, `features_df`. ### Creating multiple containment features Your completed `calculate_containment` function will be called in the next cell, which defines the helper function `create_containment_features`. > This function returns a list of containment features, calculated for a given `n` and for *all* files in a df (assumed to the the `complete_df`). For our original files, the containment value is set to a special value, -1. This function gives you the ability to easily create several containment features, of different n-gram lengths, for each of our text files. ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # Function returns a list of containment features, calculated for a given n # Should return a list of length 100 for all files in a complete_df def create_containment_features(df, n, column_name=None): containment_values = [] if(column_name==None): column_name = 'c_'+str(n) # c_1, c_2, .. c_n # iterates through dataframe rows for i in df.index: file = df.loc[i, 'File'] # Computes features using calculate_containment function if df.loc[i,'Category'] > -1: c = calculate_containment(df, n, file) containment_values.append(c) # Sets value to -1 for original tasks else: containment_values.append(-1) print(str(n)+'-gram containment features created!') return containment_values ``` ### Creating LCS features Below, your complete `lcs_norm_word` function is used to create a list of LCS features for all the answer files in a given DataFrame (again, this assumes you are passing in the `complete_df`. It assigns a special value for our original, source files, -1. ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # Function creates lcs feature and add it to the dataframe def create_lcs_features(df, column_name='lcs_word'): lcs_values = [] # iterate through files in dataframe for i in df.index: # Computes LCS_norm words feature using function above for answer tasks if df.loc[i,'Category'] > -1: # get texts to compare answer_text = df.loc[i, 'Text'] task = df.loc[i, 'Task'] # we know that source texts have Class = -1 orig_rows = df[(df['Class'] == -1)] orig_row = orig_rows[(orig_rows['Task'] == task)] source_text = orig_row['Text'].values[0] # calculate lcs lcs = lcs_norm_word(answer_text, source_text) lcs_values.append(lcs) # Sets to -1 for original tasks else: lcs_values.append(-1) print('LCS features created!') return lcs_values ``` ## EXERCISE: Create a features DataFrame by selecting an `ngram_range` The paper suggests calculating the following features: containment *1-gram to 5-gram* and *longest common subsequence*. > In this exercise, you can choose to create even more features, for example from *1-gram to 7-gram* containment features and *longest common subsequence*. You'll want to create at least 6 features to choose from as you think about which to give to your final, classification model. Defining and comparing at least 6 different features allows you to discard any features that seem redundant, and choose to use the best features for your final model! In the below cell **define an n-gram range**; these will be the n's you use to create n-gram containment features. The rest of the feature creation code is provided. ``` # Define an ngram range ngram_range = range(1,7) # The following code may take a minute to run, depending on your ngram_range """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ features_list = [] # Create features in a features_df all_features = np.zeros((len(ngram_range)+1, len(complete_df))) # Calculate features for containment for ngrams in range i=0 for n in ngram_range: column_name = 'c_'+str(n) features_list.append(column_name) # create containment features all_features[i]=np.squeeze(create_containment_features(complete_df, n)) i+=1 # Calculate features for LCS_Norm Words features_list.append('lcs_word') all_features[i]= np.squeeze(create_lcs_features(complete_df)) # create a features dataframe features_df = pd.DataFrame(np.transpose(all_features), columns=features_list) # Print all features/columns print() print('Features: ', features_list) print() # print some results features_df.head(10) ``` ## Correlated Features You should use feature correlation across the *entire* dataset to determine which features are ***too*** **highly-correlated** with each other to include both features in a single model. For this analysis, you can use the *entire* dataset due to the small sample size we have. All of our features try to measure the similarity between two texts. Since our features are designed to measure similarity, it is expected that these features will be highly-correlated. Many classification models, for example a Naive Bayes classifier, rely on the assumption that features are *not* highly correlated; highly-correlated features may over-inflate the importance of a single feature. So, you'll want to choose your features based on which pairings have the lowest correlation. These correlation values range between 0 and 1; from low to high correlation, and are displayed in a [correlation matrix](https://www.displayr.com/what-is-a-correlation-matrix/), below. ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # Create correlation matrix for just Features to determine different models to test corr_matrix = features_df.corr().abs().round(2) # display shows all of a dataframe display(corr_matrix) ``` ## EXERCISE: Create selected train/test data Complete the `train_test_data` function below. This function should take in the following parameters: * `complete_df`: A DataFrame that contains all of our processed text data, file info, datatypes, and class labels * `features_df`: A DataFrame of all calculated features, such as containment for ngrams, n= 1-5, and lcs values for each text file listed in the `complete_df` (this was created in the above cells) * `selected_features`: A list of feature column names, ex. `['c_1', 'lcs_word']`, which will be used to select the final features in creating train/test sets of data. It should return two tuples: * `(train_x, train_y)`, selected training features and their corresponding class labels (0/1) * `(test_x, test_y)`, selected training features and their corresponding class labels (0/1) ** Note: x and y should be arrays of feature values and numerical class labels, respectively; not DataFrames.** Looking at the above correlation matrix, you should decide on a **cutoff** correlation value, less than 1.0, to determine which sets of features are *too* highly-correlated to be included in the final training and test data. If you cannot find features that are less correlated than some cutoff value, it is suggested that you increase the number of features (longer n-grams) to choose from or use *only one or two* features in your final model to avoid introducing highly-correlated features. Recall that the `complete_df` has a `Datatype` column that indicates whether data should be `train` or `test` data; this should help you split the data appropriately. ``` # Takes in dataframes and a list of selected features (column names) # and returns (train_x, train_y), (test_x, test_y) def train_test_data(complete_df, features_df, selected_features): '''Gets selected training and test features from given dataframes, and returns tuples for training and test features and their corresponding class labels. :param complete_df: A dataframe with all of our processed text data, datatypes, and labels :param features_df: A dataframe of all computed, similarity features :param selected_features: An array of selected features that correspond to certain columns in `features_df` :return: training and test features and labels: (train_x, train_y), (test_x, test_y)''' # get the training features train_x = features_df[complete_df['Datatype'] == 'train'][selected_features].to_numpy() # And training class labels (0 or 1) train_y = complete_df[complete_df['Datatype'] == 'train']['Class'].to_numpy() # get the test features and labels test_x = features_df[complete_df['Datatype'] == 'test'][selected_features].to_numpy() test_y = complete_df[complete_df['Datatype'] == 'test']['Class'].to_numpy() return (train_x, train_y), (test_x, test_y) ``` ### Test cells Below, test out your implementation and create the final train/test data. ``` #complete_df.loc(list(features_df)[:2]) [list(features_df)[:2]] features_df[list(features_df)[:2]] features_df[complete_df['Datatype'] == 'train'][list(features_df)[:2]].to_numpy() #list(complete_df[complete_df['Datatype'] == 'train']['Class']) features_df """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ test_selection = list(features_df)[:2] # first couple columns as a test # test that the correct train/test data is created (train_x, train_y), (test_x, test_y) = train_test_data(complete_df, features_df, test_selection) # params: generated train/test data tests.test_data_split(train_x, train_y, test_x, test_y) ``` ## EXERCISE: Select "good" features If you passed the test above, you can create your own train/test data, below. Define a list of features you'd like to include in your final mode, `selected_features`; this is a list of the features names you want to include. ``` # Select your list of features, this should be column names from features_df # ex. ['c_1', 'lcs_word'] selected_features = ['c_1', 'c_5', 'lcs_word'] """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ (train_x, train_y), (test_x, test_y) = train_test_data(complete_df, features_df, selected_features) # check that division of samples seems correct # these should add up to 95 (100 - 5 original files) print('Training size: ', len(train_x)) print('Test size: ', len(test_x)) print() print('Training df sample: \n', train_x[:10]) from numpy import cov cov(features_df['c_1'].to_numpy(), features_df['c_2'].to_numpy()) #features_df['c_1'].to_numpy() less_corr = 1 less_corr_a=0 less_corr_b=0 for i in range(1,6): for j in range(1,6): if less_corr > features_df['c_'+str(i)].corr(features_df['c_'+str(j)]): less_corr = features_df['c_'+str(i)].corr(features_df['c_'+str(j)]) less_corr_a = i less_corr_b = j print(less_corr) print(less_corr_a) print(less_corr_b) ``` ### Question 2: How did you decide on which features to include in your final model? **Answer:** I run correlation analysis between each pair of features, and result shows that c_1 and c_5 are least correlated, therefore I chose them --- ## Creating Final Data Files Now, you are almost ready to move on to training a model in SageMaker! You'll want to access your train and test data in SageMaker and upload it to S3. In this project, SageMaker will expect the following format for your train/test data: * Training and test data should be saved in one `.csv` file each, ex `train.csv` and `test.csv` * These files should have class labels in the first column and features in the rest of the columns This format follows the practice, outlined in the [SageMaker documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-training.html), which reads: "Amazon SageMaker requires that a CSV file doesn't have a header record and that the target variable [class label] is in the first column." ## EXERCISE: Create csv files Define a function that takes in x (features) and y (labels) and saves them to one `.csv` file at the path `data_dir/filename`. It may be useful to use pandas to merge your features and labels into one DataFrame and then convert that into a csv file. You can make sure to get rid of any incomplete rows, in a DataFrame, by using `dropna`. ``` fake_x = [ [0.39814815, 0.0001, 0.19178082], [0.86936937, 0.44954128, 0.84649123], [0.44086022, 0., 0.22395833] ] fake_y = [0, 1, 1] a=np.array(fake_x) b=np.array(fake_y).reshape(3,1) np.concatenate((a,b),axis=1) def make_csv(x, y, filename, data_dir): '''Merges features and labels and converts them into one csv file with labels in the first column. :param x: Data features :param y: Data labels :param file_name: Name of csv file, ex. 'train.csv' :param data_dir: The directory where files will be saved ''' # make data dir, if it does not exist if not os.path.exists(data_dir): os.makedirs(data_dir) # your code here a = np.array(x) b = np.array(y).reshape(len(y),1) c = np.concatenate((b,a),axis=1) np.savetxt(str(data_dir)+'/'+str(filename), c, delimiter=",") # nothing is returned, but a print statement indicates that the function has run print('Path created: '+str(data_dir)+'/'+str(filename)) ``` ### Test cells Test that your code produces the correct format for a `.csv` file, given some text features and labels. ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ fake_x = [ [0.39814815, 0.0001, 0.19178082], [0.86936937, 0.44954128, 0.84649123], [0.44086022, 0., 0.22395833] ] fake_y = [0, 1, 1] make_csv(fake_x, fake_y, filename='to_delete.csv', data_dir='test_csv') # read in and test dimensions fake_df = pd.read_csv('test_csv/to_delete.csv', header=None) # check shape assert fake_df.shape==(3, 4), \ 'The file should have as many rows as data_points and as many columns as features+1 (for indices).' # check that first column = labels assert np.all(fake_df.iloc[:,0].values==fake_y), 'First column is not equal to the labels, fake_y.' print('Tests passed!') # delete the test csv file, generated above ! rm -rf test_csv ``` If you've passed the tests above, run the following cell to create `train.csv` and `test.csv` files in a directory that you specify! This will save the data in a local directory. Remember the name of this directory because you will reference it again when uploading this data to S3. ``` # can change directory, if you want data_dir = 'plagiarism_data' """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ make_csv(train_x, train_y, filename='train.csv', data_dir=data_dir) make_csv(test_x, test_y, filename='test.csv', data_dir=data_dir) ``` ## Up Next Now that you've done some feature engineering and created some training and test data, you are ready to train and deploy a plagiarism classification model. The next notebook will utilize SageMaker resources to train and test a model that you design.
github_jupyter
# **Optimización - Actividad 3** ![researchgate-logo.png](https://www.uninorte.edu.co/Uninorte/images/topbar_un/headerlogo_un.png) * Estudiante: Alejandro Jesús Manotas Marmolejo * Código: 200108289 # **Pregunta 1.** Considere $N$ funciones convexas $f_i(x):\Re\Rightarrow \Re$, para $1 \le i \le N$, demuestre que su suma \begin{eqnarray} \sum_{i=1}^{N} f_i(x)\,, \end{eqnarray} es convexa. **Respuesta.** \begin{equation} \begin{split} & x_1, x_2 \in \Re\\ & z = \alpha x_1 + (1-\alpha)x_2\\ & f(z) = g(\alpha x_1 + (1-\alpha)x_2)\\ & g(x) = \sum_{i=1}^{N} f_i(x)\\ & = \sum_{i=1}^{N} f_i(z)\\ & = \sum_{i=1}^{N} f(\alpha x_1 + (1-\alpha)x_2)\\ & = \sum_{i=1}^{N} \alpha f_i(x_1) + (1-\alpha)f_i(x_2)\\ & = \sum_{i=1}^{N} \alpha f_i(x_1) + \sum_{i=1}^{N} (1-\alpha)f_i(x_2)\\ & = \alpha \sum_{i=1}^{N} f_i(x_1) + (1-\alpha)\sum_{i=1}^{N} f_i(x_2)\\ & = \alpha g(x_1) + (1-\alpha) g(x_2) \end{split} \end{equation} --- # **Pregunta 2** Demuestre que $g(x) = a\cdot f(x) +b$ es convexa, en donde $a,b\ge 0$ y $f(x):\Re\Rightarrow \Re$ es una función convexa. **Respuesta.** \begin{equation} \begin{split} & x_1, x_2 \in \Re\\ & z = \alpha x_1 + (1-\alpha)x_2\\ & f(z) = g(\alpha x_1 + (1-\alpha)x_2)\\ & g(x) = a \cdot f(x) + b\\ & = a \cdot f(z) + b \\ & = a \cdot (f(\alpha x_1 + (1-\alpha)x_2)) + b\\ & = a \cdot (\alpha f(x_1) + (1-\alpha) f(x_2)) + b\\ & = a\alpha f(x_1) + (1-\alpha)af(x_2) + b \\ & = a\alpha f(x_1) + (1-\alpha)af(x_2) + b + \alpha b - \alpha b\\ & = a\alpha f(x_1) + \alpha b + (1-\alpha)af(x_2) + b - \alpha b\\ & = \alpha (a f(x_1) + b) + (1-\alpha)af(x_2) + b (1 - \alpha)\\ & = \alpha (a f(x_1) + b) + (1-\alpha)af(x_2) + b (1 - \alpha)\\ & = \alpha [a f(x_1) + b] + (1-\alpha) [af(x_2) + b]\\ & = \alpha g(x_1) + (1-\alpha) g(x_2) \end{split} \end{equation} --- # **Pregunta 3** Demuestre que, si $f(x):\Re \Rightarrow \Re$ es convexa, entonces $g(x) = f(a\cdot x +b)$ es convexa también, en donde $a,b \ge 0$. **Respuesta.** \begin{equation} \begin{split} & x_1, x_2 \in \Re \\ & z = \alpha x_1 + (1-\alpha) x_2\\ & f(z) = g(\alpha x_1 + (1-\alpha)x_2)\\ & g(x) = f(a \cdot x + b)\\ & = f(a \cdot (\alpha x_1 + (1-\alpha)x_2) + b)\\ & = f(a \alpha x_1 + (1-\alpha)ax_2 + b)\\ & = f(a \alpha x_1 + (1-\alpha)ax_2 + b + \alpha b - \alpha b)\\ & = f(a \alpha x_1 + \alpha b + (1-\alpha)ax_2 + b - \alpha b)\\ & = f(\alpha [a x_1 + b] + (1-\alpha)ax_2 + b [1- \alpha])\\ & = f(\alpha [a x_1 + b] + (1-\alpha) [ax_2 + b])\\ & = \alpha f([a x_1 + b]) + (1-\alpha) f(ax_2 + b)\\ & = \alpha g(x_1) + (1-\alpha) g(x_2)\\ \end{split} \end{equation} --- **Recuerde:** En este curso no se tolerará el plagio. Sin excepción, en caso de presentarse esta situación, a los estudiantes involucrados se les iniciará proceso de investigación, y se actuará en conformidad con el Reglamento de Estudiantes de la Universidad del Norte. El plagio incluye: usar contenidos sin la debida referencia, de manera literal o con mínimos cambios que no alteren el espíritu del texto/código; adquirir con o sin intención, trabajos de terceros y presentarlos parcial o totalmente como propios; presentar trabajos en grupo donde alguno de los integrantes no trabajó o donde no se hubo trabajo en equipo demostrable; entre otras situaciones definidas en el manual de fraude académico de la Universidad del Norte: (https://guayacan.uninorte.edu.co/normatividad_interna/upload/File/Guia_Prevencion_Fraude%20estudiantes(5).pdf ) **Isaías 40:31.** pero los que esperan a Jehová tendrán nuevas fuerzas; levantarán alas como las águilas; correrán, y no se cansarán; caminarán, y no se fatigarán.
github_jupyter
... ***CURRENTLY UNDER DEVELOPMENT*** ... ## Obtain synthetic waves and water level timeseries under a climate change scenario (future AWTs occurrence probability) inputs required: * Historical DWTs (for plotting) * Historical wave families (for plotting) * Synthetic DWTs climate change * Historical intradaily hydrograph parameters * TCs waves * Fitted multivariate extreme model for the waves associated to each DWT in this notebook: * Generate synthetic time series of wave conditions ``` #!/usr/bin/env python # -*- coding: utf-8 -*- # common import os import os.path as op # pip import numpy as np import xarray as xr import pandas as pd from datetime import datetime import matplotlib.pyplot as plt # DEV: override installed teslakit import sys sys.path.insert(0, op.join(os.path.abspath(''), '..', '..','..', '..')) # teslakit from teslakit.database import Database from teslakit.climate_emulator import Climate_Emulator from teslakit.waves import AWL, Aggregate_WavesFamilies from teslakit.plotting.outputs import Plot_FitSim_Histograms from teslakit.plotting.extremes import Plot_FitSim_AnnualMax, Plot_FitSim_GevFit, Plot_Fit_QQ from teslakit.plotting.waves import Plot_Waves_Histogram_FitSim ``` ## Database and Site parameters ``` # -------------------------------------- # Teslakit database p_data = r'/Users/anacrueda/Documents/Proyectos/TESLA/data' # offshore db = Database(p_data) db.SetSite('ROI') # climate change - S5 db_S5 = Database(p_data) db_S5.SetSite('ROI_CC_S5') # climate emulator simulation modified path p_S5_CE_sims = op.join(db_S5.paths.site.EXTREMES.climate_emulator, 'Simulations') # -------------------------------------- # Load data for climate emulator simulation climate change: ESTELA DWT and TCs (MU, TAU) DWTs_sim = db_S5.Load_ESTELA_DWT_sim() # DWTs climate change TCs_params = db.Load_TCs_r2_sim_params() # TCs parameters (copula generated) TCs_RBFs = db.Load_TCs_sim_r2_rbf_output() # TCs numerical_IH-RBFs_interpolation output probs_TCs = db.Load_TCs_probs_synth() # TCs synthetic probabilities pchange_TCs = probs_TCs['category_change_cumsum'].values[:] l_mutau_wt = db.Load_MU_TAU_hydrograms() # MU - TAU intradaily hidrographs for each DWT MU_WT = np.array([x.MU.values[:] for x in l_mutau_wt]) # MU and TAU numpy arrays TAU_WT = np.array([x.TAU.values[:] for x in l_mutau_wt]) # solve first 10 DWTs simulations DWTs_sim = DWTs_sim.isel(n_sim=slice(0, 10)) #DWTs_sim = DWTs_sim.isel(time=slice(0,365*40+10), n_sim=slice(0,1)) print(DWTs_sim) ``` ## Climate Emulator - Simulation ``` # -------------------------------------- # Climate Emulator extremes model fitting # Load Climate Emulator CE = Climate_Emulator(db.paths.site.EXTREMES.climate_emulator) CE.Load() # set a new path for S5 simulations CE.Set_Simulation_Folder(p_S5_CE_sims, copy_WAVES_noTCs = False) # climate change waves (no TCs) not simulated, DWTs have changed # optional: list variables to override distribution to empirical #CE.sim_icdf_empirical_override = ['sea_Hs_31', # 'swell_1_Hs_1','swell_1_Tp_1', # 'swell_1_Hs_2','swell_1_Tp_2',] # set simulated waves min-max filter CE.sim_waves_filter.update({ 'hs': (0, 8), 'tp': (2, 25), 'ws': (0, 0.06), }) # -------------------------------------- #  Climate Emulator simulation # each DWT series will generate a different set of waves for n in DWTs_sim.n_sim: print('- Sim: {0} -'.format(int(n)+1)) # Select DWTs simulation DWTs = DWTs_sim.sel(n_sim=n) # Simulate waves n_ce = 1 # (one CE sim. for each DWT sim.) WVS_sim = CE.Simulate_Waves(DWTs, n_ce, filters={'hs':True, 'tp':True, 'ws':True}) # Simulate TCs and update simulated waves TCs_sim, WVS_upd = CE.Simulate_TCs(DWTs, WVS_sim, TCs_params, TCs_RBFs, pchange_TCs, MU_WT, TAU_WT) # store simulation data CE.SaveSim(WVS_sim, TCs_sim, WVS_upd, int(n)) ```
github_jupyter
# Strings in Python ## What is a string? A "string" is a series of characters of arbitrary length. Strings are immutable - they cannot be changed once created. When you modify a string, you automatically make a copy and modify the copy. ``` s1 = 'Godzilla' print s1, s1.upper(), s1 ``` ## String literals A "literal" is essentially a string constant, already spelled out for you. Python uses either on output, but that's just for formatting simplicity. ``` "Godzilla" ``` ### Single and double quotes Generally, a string literal can be in single ('), double ("), or triple (''') quotes. Single and double quotes are equivalent - use whichever you prefer (but be consistent). If you need to have a single or double quote in your literal, surround your literal with the other type, or use the backslash to escape the quote. ``` "Godzilla's a kaiju." 'Godzilla\'s a kaiju.' 'We call him... "Godzilla".' ``` ### Triple quotes (''') Triple quotes are a special form of quoting used for documenting your Python files (docstrings). We won't discuss that type here. ### Raw strings Raw strings don't use any escape character interpretation. Use them when you have a complicated string that you don't want to clutter with lots of backslashes. Python puts them in for you. ``` print('This is a\ncomplicated string with newline escapes in it.') print(r'This is a\ncomplicated string with newline escapes in it.') ``` ## Strings and numbers ``` x=int('122', 3) x+1 ``` ### String objects String objects are just the string variables you create in Python. ``` kaiju = 'Godzilla' print(kaiju) kaiju ``` Note the print() call shows no quotes, while the simple variable name did. That is a Python output convention. Just entering the name will call the repr() method, which displays the value of the argument as Python would see it when it reads it in, not as the user wants it. ``` repr(kaiju) print(repr(kaiju)) ``` ### String operators When you read text from a file, it's just that - text. No matter what the data represents, it's still text. To use it as a number, you have to explicitly convert it to a number. ``` one = 1 two = '2' print one, two, one + two one = 1 two = int('2') print one, two, one + two num1 = 1.1 num2 = float('2.2') print num1, num2, num1 + num2 ``` You can also do this with hexadecimal and octal numbers, or any other base, for that matter. ``` print int('FF', 16) print int('0xff', 16) print int('777', 8) print int('0777', 8) print int('222', 7) print int('110111001', 2) ``` If the conversion cannot be done, an exception is thrown. ``` print int('0xGG', 16) ``` #### Concatenation ``` kaiju1 = 'Godzilla' kaiju2 = 'Mothra' kaiju1 + ' versus ' + kaiju2 ``` #### Repetition ``` 'Run away! ' * 3 ``` ### String keywords #### in() NOTE: This _particular_ statement is false regardless of how the statement is evaluated! :^) ``` 'Godzilla' in 'Godzilla vs Gamera' ``` ### String functions #### len() ``` len(kaiju) ``` ### String methods Remember - methods are functions attached to objects, accessed via the 'dot' notation. #### Basic formatting and manipulation ##### capitalize()/lower()/upper()/swapcase()/title() ``` kaiju.capitalize() kaiju.lower() kaiju.upper() kaiju.swapcase() 'godzilla, king of the monsters'.title() ``` ##### center()/ljust()/rjust() ``` kaiju.center(20, '*') kaiju.ljust(20, '*') kaiju.rjust(20, '*') ``` ##### expandtabs() ``` tabbed_kaiju = '\tGodzilla' print('[' + tabbed_kaiju + ']') print('[' + tabbed_kaiju.expandtabs(16) + ']') ``` ##### join() ``` ' vs '.join(['Godzilla', 'Hedorah']) ','.join(['Godzilla', 'Mothra', 'King Ghidorah']) ``` ##### strip()/lstrip()/rstrip() ``` ' Godzilla '.strip() 'xxxGodzillayyy'.strip('xy') ' Godzilla '.lstrip() ' Godzilla '.rstrip() ``` ##### partition()/rpartition() ``` battle = 'Godzilla x Gigan' battle.partition(' x ') battle = 'Godzilla and Jet Jaguar vs. Gigan and Megalon' battle.partition(' vs. ') battle = 'Godzilla vs Megalon vs Jet Jaguar' battle.partition('vs') battle = 'Godzilla vs Megalon vs Jet Jaguar' battle.rpartition('vs') ``` ##### replace() ``` battle = 'Godzilla vs Mothra' battle.replace('Mothra', 'Anguiras') battle = 'Godzilla vs a monster and another monster' battle.replace('monster', 'kaiju', 2) battle = 'Godzilla vs a monster and another monster and yet another monster' battle.replace('monster', 'kaiju', 2) ``` ##### split()/rsplit() ``` battle = 'Godzilla vs King Ghidorah vs Mothra' battle.split(' vs ') kaijus = 'Godzilla,Mothra,King Ghidorah' kaijus.split(',') kaijus = 'Godzilla Mothra King Ghidorah' kaijus.split() kaijus = 'Godzilla,Mothra,King Ghidorah,Megalon' kaijus.rsplit(',', 2) ``` ##### splitlines() ``` kaijus_in_lines = 'Godzilla\nMothra\nKing Ghidorah\nEbirah' print(kaijus_in_lines) kaijus_in_lines.splitlines() kaijus_in_lines.splitlines(True) ``` ##### zfill() ``` age_of_Godzilla = 60 age_string = str(age_of_Godzilla) print(age_string, age_string.zfill(5)) ``` #### String information ##### isXXX() ``` print('Godzilla'.isalnum()) print('*Godzilla*'.isalnum()) print('Godzilla123'.isalnum()) print('Godzilla'.isalpha()) print('Godzilla123'.isalpha()) print('Godzilla'.isdigit()) print('60'.isdigit()) print('SpaceGodzilla'.isspace()) print(' '.isspace()) print('Godzilla'.islower()) print('godzilla'.islower()) print('Godzilla'.isupper()) print('GODZILLA'.isupper()) print('Godzilla vs Mothra'.istitle()) print('Godzilla X Mothra'.istitle()) ``` ##### count() ``` monsters = 'Godzilla and Space Godzilla and MechaGodzilla' print 'There are ', monsters.count('Godzilla'), ' Godzillas.' print 'There are ', monsters.count('Godzilla', len('Godzilla')), ' pseudo-Godzillas.' ``` ##### startswith()/endswith() ``` king_kaiju = 'Godzilla' print king_kaiju.startswith('God') print king_kaiju.endswith('lla') print king_kaiju.startswith('G') print king_kaiju.endswith('amera') ``` ##### find()/index()/rfind()/rindex() ``` kaiju_string = 'Godzilla,Gamera,Gorgo,Space Godzilla' print 'The first Godz is at position', kaiju_string.find('Godz') print 'The second Godz is at position', kaiju_string.find('Godz', len('Godz')) kaiju_string.index('Minilla') kaiju_string.rindex('Godzilla') ``` #### Advanced features ##### decode()/encode()/translate() Used to convert strings to/from Unicode and other systems. Rarely used in science code. ##### String formatting Similar to formatting in C, FORTRAN, etc.. There is a _lot_ more to this than I am showing here. ``` kaiju = 'Godzilla' age = 60 print '%s is %d years old.' % (kaiju, age) ``` ## The _string_ module The _string_ module is the Python equivalent of "junk DNA" in living organisms. It's been around since the beginning, but many of its functions have been superseded by evolution. But some ancient code still relies on it, so they leave the old parts in.... For modern code, the _string_ module does have some useful constants and functions. ``` import string print string.ascii_letters print string.ascii_lowercase print string.ascii_uppercase print string.digits print string.hexdigits print string.octdigits print string.letters print string.lowercase print string.uppercase print string.printable print string.punctuation print string.whitespace ``` The _string_ module also provides the _Formatter_ class, which can be useful for sophisticated text formatting. ## Regular Expressions ### What is a regular expression? Regular expressions ('regexps') are essentially a mini-language for describing string operations. Everything shown above with string methods and operators can be done with regular expressions. Most of the time, the regular expression verrsion is more concise. But not always more readable.... To use regular expressions, you have to import the 're' module. ``` import re ``` ### A very short, whirlwind tour of regular expressions #### Scanning ``` kaiju_truth = 'Godzilla is the King of the Monsters. Ebirah is also a monster, but looks like a giant lobster.' re.findall('Godz', kaiju_truth) print re.findall('(^.+) is the King', kaiju_truth) ``` For simple searches like this, using in() is typically easier. Regexps are by default case-sensitive. ``` print re.findall('\. (.+) is also', kaiju_truth) print re.findall('(.+) is also a (.+)', kaiju_truth)[0] print re.findall('\. (.+) is also a (.+),', kaiju_truth)[0] ``` #### Changing ``` some_kaiju = 'Godzilla, Space Godzilla, Mechagodzilla' print re.sub('Godzilla', 'Gamera', some_kaiju) print re.sub('(?i)Godzilla', 'Gamera', some_kaiju) ``` #### And so much more... You could spend a whole day (or more) just learning about regular expressions. But they are incredibly useful and powerful, especially in the all-to-frequent drudgery of munging files from one format to another. Regular expressions can be internally compiled for speed.
github_jupyter
# Intel® Distribution for GDB* In this notebook, we will cover using the Intel® Distribution for GDB* to debug oneAPI applications on the GPU. ##### Sections - [Intel Distribution for GDB Overview](#Intel-Distribution-for-GDB-Overview) - [How does the Intel Distribution for GDB debug GPUs?](#How-does-Intel-Distribution-for-GDB-debug-GPUs?) - [GDB Commands](#GDB-Commands) - [Debug an Application](#Debug-an-Application) - [Multi-Device Debugging](#Multi-Device-Debugging) Note: Unlike other modules in the oneAPI Essentials series, this notebook is designed for the DevCloud and cannot be run in a local environment. This is because when GDB pauses the GPU execution, display rendering is also interrupted. ## Learning Objectives The goal of this notebook is to show how the Intel® Distribution for GDB* can help you debug GPU kernels. At the end of module, you will be able to: <ul> <li>Run the Intel Distribution for GDB.</li> <li>Understand inferiors, threads, and SIMD lanes as shown in GDB.</li> <li>Use different methods to examine local variables for different threads and lanes.</li> </ul> ## Intel Distribution for GDB Overview Intel® Distribution for GDB* (*gdb-oneapi* executable) is part of the Intel® oneAPI Base Toolkit. It can be used to debug oneAPI applications written in several different languages targeting various different accelerator devices. <img src="assets/gdb_overview.jpg"> ### Major Features * Multi-target: The debugger can orchestrate multiple targets for different architectures. This feature allows you to debug the "host" portion and the "kernel" of a DPC++ program in the same GDB* session. * Auto-attach: The debugger automatically creates an inferior that attaches itself to the Intel® Graphics Technology target to be able to receive events and control the GPU for debugging. * Thread and SIMD lanes: The debugger displays SIMD lane information for the GPU threads on the command line interface. You can switch among active threads and lanes. * Support for debugging a kernel offloaded to a CPU, GPU, or FPGA-emulation device. ## How does the Intel Distribution for GDB debug GPUs? ### Compilation and Execution for Debug When debugging oneAPI applications with gdb-oneapi, debug information for the GPU needs to be generated and embedded in the application. The compilation and execution process looks like the following. <img src="assets/gpu_debug.jpg"> 1. Source code is compiled. Host code is compiled normally while kernel code is compiled with debug info into SPIR-V intermediate representation format embedded in the host binary. * Use -g (generate debug info) and -O0 (disable optimization) compiler options to debug source. * May use -O2 to debug optimized code at assembly level. * Use same optimization level when linking, if compiling and linking separately. * Ahead-of-time (AOT) compilation also works with GPU debug and can be utilize to avoid JIT compilation everytime application is run. 2. Launch appliction with `gdb-oneapi` * `gdb-oneapi <your_app>` 3. Application runtime compiles SPIR-V and debug info into ELF and DWARF formats. 4. GPU kernel code is executed and debugged. ### Inferiors for GPUs GDB creates objects called *inferior*s to represent the state of each program execution. An inferior usually corresponds to a debugee process. For oneAPI applications, GDB will create one inferior for the native host target and additional inferiors for each GPU or GPU tile. When a GPU application is debugged, the debugger, by default, automatically launches a `gdbserver-gt` process to listen to GPU debug events. The `gdbserver-gt` target is then added to the debugger as an inferior. <img src="assets/gdb_gpu_inferior.jpg"> To see information about the inferiors while debugging. Use the `info inferiors` GDB command. ### Debugging Threaded GPU SIMD Code GPU kernel code is written for a single work-item. When executing, the code is implicitly threaded and widened to vectors of work-items. In the Intel Distribution for GDB, variable locations are expressed as functions of the SIMD lane. The lane field is added to the thread representation in the form of `<inferior>.<thread>:<lane>`. Users can use the `info threads` command to see information about the various active threads. The `thread` command can be used to switching among active threads and SIMD lanes. The `thread apply <thread>:<lane> <cmd>` command can be used to apply the specified command to the specified lanes. SIMD Lanes Support: * Only enabled SIMD lanes are displayed * SIMD width is not fixed * User can switch between enabled SIMD lanes * After a stop, GDB switches to an enabled SIMD lane ## GDB Commands The following table lists some common GDB commands. If a command has special functionality for GPU debugging, description will be shown in orange. You may also consult the [Intel Distribution for GDB Reference Sheet](https://software.intel.com/content/www/us/en/develop/download/gdb-reference-sheet.html). | Command | Description | | ---: | :--- | | help \<cmd> | Print help information. | | run [arg1, ... argN] | Start the program, optionally with arguments. | | break \<file>:\<line> | Define a breakpoint at a specified line. | | info break | Show defined breakpoints. | | delete \<N> | Remove Nth breakpoint. | | step / next | Single-step a source line, stepping into / over function calls. | | info args/locals | Show the arguments/local variables of the current function. | | print \<exp> | Print value of expression. | | x/\<format> \<addr> | Examine the memory at \<addr>. | | up, down | Go one level up/down the function call stack | | disassemble | Disassemble the current function. <font color='orange'> If inside a GPU kernel, GPU instructions will be shown. </font> | | backtrace | Shown the function call stack. | | info inferiors | Display information about the inferiors. <font color='orange'> GPU debugging will display additional inferior(s) (gdbserver-gt). </font> | | info threads \<thread> | Display information about threads, including their <font color='orange'> active SIMD lanes. </font> | | thread \<thread>:\<lane> | Switch context to the <font color='orange'> SIMD lane of the specified thread. <font> | | thread apply \<thread>:\<lane> \<cmd> | Apply \<cmd> to specified lane of the thread. | | set scheduler-locking on/step/off | Lock the thread scheduler. Keep other threads stopped while current thread is stepping (step) or resumed (on) to avoid interference. Default (off). | | set nonstop on/off | Enable/disable nonstop mode. Set before program starts. <br> (off) : When a thread stops, all other threads stop. Default. <br> (on) : When a thread stops, other threads keep running. | | print/t $emask | Inspect the execution mask to show active SIMD lanes. | ## Debug an Application The kernel we're going to debug is a simple array transform function where the kernel adds 100 to even elements of the array and sets the odd elements to be -1. Below is the kernel code, the entire source code is [here](src/array-transform.cpp). ``` cpp 54 h.parallel_for(data_range, [=](id<1> index) { 55 size_t id0 = GetDim(index, 0); 56 int element = in[index]; // breakpoint-here 57 int result = element + 50; 58 if (id0 % 2 == 0) { 59 result = result + 50; // then-branch 60 } else { 61 result = -1; // else-branch 62 } 63 out[index] = result; 64 }); ``` ### Compile the Code Execute the following cell to compile the code. Notice the compiler options used to disable optimization and enable debug information. ``` ! dpcpp -O0 -g src/array-transform.cpp -o bin/array-transform ``` ### Create a debug script To debug on the GPU, we're going to write the GDB debug commands to a file and then submit the execution of the debugger to a node with GPUs. In our first script, we'll get take a look at how inferiors, threads, and SIMD lanes are represented. Our debug script will perform the following tasks. 1. Set a temporary breakpoint in the DPCPP kernel at line 59. 2. Run the application in the debugger. 3. Display information about the active inferiors once the breakpoint is encountered. 4. Display information about the active threads and SIMD lanes. 5. Display the execution mask showing which SIMD lanes are active. 6. Remove breakpoint. 7. Continue running. Execute the following cell to write the debug commands to file. ``` %%writefile lab/array-transform.gdb #Set Breakpoint in the Kernel echo ================= (1) tbreak 59 ===============\n tbreak 59 # Run the application on the GPU echo ================= (2) run gpu ===============\n run gpu echo ================= (3) info inferiors ============\n info inferiors echo ================= (4) info threads ============\n info threads # Show execution mask that show active SIMD lanes. echo ================= (5) print/t $emask ============\n print/t $emask echo ================= (6) c ==========================\n c ``` ### Start the Debugger The [run_debug.sh](run_debug.sh) script runs the *gdb-oneapi* executable with our debug script on the compiled application. Execute the following cell to submit the debug job to a node with a GPU. ``` ! chmod 755 q; chmod 755 run_debug.sh; if [ -x "$(command -v qsub)" ]; then ./q run_debug.sh; else ./run_debug.sh; fi ``` #### Explanation of Output 1. You should see breakpoint 1 created at line 59. 2. Application is run with the *gpu* argument to execute on the GPU device. Program should stop at the kernel breakpoint. 3. With context now automatically switched to the device. The *info inferiors* command will show the active GDB inferior(s). Here, you should see two, one corresponds to the host portion, another, the active one, for gdbserver-gt which is debugging the GPU kernel. 4. The *info threads* command allows you to examine the active threads and SIMD lanes. There should be 8 threads active. Notice that only even SIMD lanes are active, this is because only the even work-items encounter the breakpoint at line 59. 5. Printing the $emask execution mask also shows the even lanes being active. 6. Continue running the program. ## Debug the Application Again Now, we will debug the application again. This time, we'll switch threads, use the scheduler-locking feature, and print local variables. Run the following cell to write new GDB commands to array-transform.gdb. ``` %%writefile lab/array-transform.gdb #Set Breakpoint in the Kernel echo ================= (1) break 59 ===============\n break 59 echo ================= (2) break 61 ===============\n break 61 # Run the application on the GPU echo ================= (3) run gpu ===============\n run gpu # Keep other threads stopped while current thread is stepped echo ================= (4) set scheduler-locking step ===============\n set scheduler-locking step echo ================= (5) next ===============\n next echo ================= (6) info threads 2.* ===============\n info threads 2.* echo ================= (7) Print element ============\n print element # Switch thread echo ================= (8) thread 2.1:5 =======================\n thread 2.1:4 echo ================= (9) Print element ============\n print element echo ================= (10) thread apply 2.1:* print element =======================\n thread apply 2.1:* print element # Inspect vector of a local variable, 8 elements, integer word echo ================= (11) x/8dw &result =======================\n x /8dw &result echo ================= (12) d 1 =======================\n d 1 echo ================= (13) d 2 =======================\n d 2 echo ================= (14) c ==========================\n c ``` ### Start Debugger Again To Examine Variables, Memories Run the following cell to run the debugger for the second time. ``` ! chmod 755 q; chmod 755 run_debug.sh; if [ -x "$(command -v qsub)" ]; then ./q run_debug.sh; else ./run_debug.sh; fi ``` ### Explanation of Output 1. Set Breakpoint at line 59 for the even lanes. 2. Set Breakpoint at line 61 for the odd lanes. 3. Start the application, it will stop at breakpoint 2. At this point all the threads should be stopped and active for the odd SIMD lanes. 4. Set schedule-locking so that when the current thread is stepped all other threads remain stopped. 5. Step the current thread (thread 2.1), breakpoint at line 59 is encountered only for current thread. 6. Show the threads and where each thread is stopped. Notice the current thread is stopped and active at the even SIMD lanes. 7. Print local variable element. 8. Switch to a different lane. 9. Print local variable element again, this time you should see a different value. 10. Use thread apply to print element for all lanes of the 2.1 thread. 11. Print vectorized result. 12. Delete breakpoint. 13. Delete breakpoint. 14. Run until the end. ## Multi-Device Debugging The Intel Distribution for GDB can debug applications that offload a kernel to multiple GPU devices. Each GPU device appear as a separate inferior within the debugger. Users can switch to the context of a thread that corresponds to a particular GPU or CPU using the `inferior <id>` command. Threads of the GPUs can be independently resumed and the thread state can be individually examined. ## References * [Intel Distribution for GDB Landing Page](https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/distribution-for-gdb.html) * [Intel Distribution for GDB Release Notes](https://software.intel.com/content/www/us/en/develop/articles/gdb-release-notes.html) * [Intel Distribution for GDB Reference Sheet](https://software.intel.com/content/www/us/en/develop/download/gdb-reference-sheet.html) ## Summary * Used Intel Distribution for GDB to debug a GPU application. * Used various-GPU related GDB commands.
github_jupyter
# RefAssig V0.0 this is my faster simple version of the PMI and Abstract gene count scoring system. Improvements: * rapid access * streamlined function calls * clean data output * xml abstract data parsing --- ## 1.0 Libraries and input The work flow looks like this: 1. read in a list of chemicals as a csv with a column named chemicals and all chemicals examined in this study 2. read in a query map as a csv * a query map is a relationship map between all query phrases and all classifiers * three columns are used: * A general class/super class for a query * a direction: 'pos' or 'neg' refereing to positive or negative * the search phrase surrounded by quotes, e.g., ' "dna damage" ' The work flow will automatically construction all wanted querries between chemicals and search phrases from these list. In later steps the results can be reduced to a meaningfull level by knowing if search phrases are addative for the target or subtractive e.g., metal stresss vs chelator. Here, a high number of chelator hits and metal stress hits might indicate that although the chemical might have a significant response for metal stress it also is likely related tagentially to the overall function. Think about what queries you are supplying to ensure they are meaningful. --- # RAPID QUERY FUNCTION ``` #This function reads csv with a list of chemical names and a query map and returns a list of dataframes for all combinations of queries in the data set #this result can be input to two functions: ```PMI_matrix``` which will give you information about the information or strength of the search phrase relative to the total query_result = function(chemical.list.path, query.map.path = NULL, limit_input_to = NULL, output.path) { #Libraries suppressMessages(library(tidyverse)) suppressMessages(library(easyPubMed)) suppressMessages(library(kableExtra)) suppressMessages(library(parallel)) #testingpaths chemical.list.path = "input/chemical-list-LINCS-2021.07.23.csv" query.map.path = "input/query.map-build.csv" #query.map.path = NULL limit_input_to = NULL if(!is.null(query.map.path)) { ###################### #SEARCH WITH QUERY MAP #read chemical list chems = suppressMessages(read_csv(paste0(chemical.list.path))) #read query map q.map = suppressMessages(read_csv(paste0(query.map.path))) %>% mutate(phrase.q = paste0('"',phrase,'"')) #cutchems if(is.null(limit_input_to)) {chems = chems$chemicals[1:length(chems$chemicals)]} else {chems = chems$chemicals[1:limit_input_to]} #build partial search vectors chems.search = paste0('"', chems,'" AND ') searchterms.search = q.map$phrase.q #controlable and bindable search grid with directions search.combinations = expand.grid(chems.search, searchterms.search, stringsAsFactors = FALSE) %>% setNames(c('chemical', 'phrase.q')) %>% left_join(q.map, by = "phrase.q") %>% mutate(query = paste(chemical, phrase.q)) %>% select(SRP, direction, query) #acutal query list search.query = search.combinations$query %>% as.list() } else { ####################### #SEARCH WITH ONLY CHEMS #read chemical list chems = suppressMessages(read_csv(paste0(chemical.list.path))) #cutchems if(is.null(limit_input_to)) {chems = chems$chemicals[1:length(chems$chemicals)]} else {chems = chems$chemicals[1:limit_input_to]} #build partial search vectors chems.search = paste0('"', chems,'"') #actual query list search.query = chems.search } #build output object query.result = vector(length = 3, mode = "list") #query query.result[[1]] = mclapply(seq_along(search.query), function(x){ Sys.sleep(time = 2) get_pubmed_ids(search.query[[x]], api_key = 'b3accc43abfb376d63b0c2d2f7d8f984de09') %>% {if(class(.) == 'try-error') 'TRY-FAILED-increase sys sleep' else {if(as.numeric(.$Count) > 0) fetch_pubmed_data(., retstart = 0, retmax = 100000) %>% articles_to_list() %>% lapply(., article_to_df, max_chars = 100000, getAuthors = FALSE) %>% do.call(rbind, .) else 'EMPTY' }} }, mc.cores = 10) %>% setNames(search.query) #clean out empty hits query.result[[1]] = query.result[[1]][query.result[[1]] != 'EMPTY'] #save additional mapping information if a map was used if(is.null(query.map.path)) {query.result[[2]] = 'NO MAP USED'} else {query.result[[2]] = search.combinations} if(is.null(query.map.path)) {query.result[[3]] = 'NO MAP USED'} else {query.result[[3]] = q.map} #save result base::save(query.result, file = paste0("../AbstractR-projects/LINCS/output/query.result_LINCS_", Sys.Date(),".RData")) return(query.result) } ``` --- # ABSTRACT GENE COUNT PARSING FINAL FUNCTION [ ] update prioritization of chemical over abstract for gene parsing line 54 - change outter apply to MC and inner to lapply '''#isolate genes from all abstracts abstracts.genes.filtered = lapply(seq_along(abstracts), function(y) mclapply(1:nrow(abstracts[[y]]), function(x) abstracts[[y]][x,2] %>% remove.punc() %>% match.genes(mouse.rat.vec), mc.cores = 32) %>% unlist()) %>% setNames(names(abstracts))`'' ``` #This function reads csv with a list of chemical names and a query map and returns a list of dataframes for all combinations of queries in the data set #this result can be input to two functions: ```PMI_matrix``` which will give you information about the information or strength of the search phrase relative to the total abstract_gene_counts = function(query_result_object) { #Libraries suppressMessages(library(tidyverse)) library(parallel) #helper functions remove.punc = function(x) { string.1 = gsub("(?!\\.)[[:punct:]]", "", x, perl=TRUE) string.2 = substr(string.1,1,nchar(string.1)-1) #%>% tolower() string.3 = str_split(string.2, " ") %>% unlist() return(string.3) } match.genes <- function(sentence, genelist) { gene.matches = sentence[sentence %in% genelist] return(gene.matches) } #testingpaths #query_result_object = "../AbstractR-projects/LINCS/output/query.result_LINCS_2021-07-29.RData" #read query result load(paste0(query_result_object)) #set up genes dictionary - improve this in the future, allow for a list of homolouges in a csv from bioMart - for now use your H-M-R set up #read in coding genes #also a data dir or lib is needed gene.list = suppressMessages(suppressWarnings(read_tsv("data/proteincodinggenesHGNC.txt"))) %>% pull(symbol) %>% unique() #read in non human gene lists homolouge lists nh.gene.list <- suppressMessages(read_csv("data/genes.list_h.m.r.csv")) %>% filter(human %in% gene.list) %>% distinct(`.keep_all` = FALSE) #comparative lists mouse.rat.vec = c(nh.gene.list$human, nh.gene.list$mouse, nh.gene.list$rat) %>% unique() #filter some bad gene names bad.genes.q = c('a') mouse.rat.vec = mouse.rat.vec[!mouse.rat.vec %in% bad.genes.q] #take only pmids and abstract from the query resul abstracts = mclapply(seq_along(query.result[[1]]), function(x) { query.result[[1]][[x]] %>% select(pmid, abstract) }, mc.cores = 32) %>% setNames(names(query.result[[1]])) #isolate genes from all abstracts abstracts.genes.filtered = mclapply(seq_along(abstracts), function(y) lapply(1:nrow(abstracts[[y]]), function(x) abstracts[[y]][x,2] %>% remove.punc() %>% match.genes(mouse.rat.vec)) %>% unlist() %>% na.omit, mc.cores = 32) %>% setNames(names(abstracts)) #remove empty gene matches abstracts.genes.filtered = abstracts.genes.filtered[names(lapply(abstracts.genes.filtered, length)[lapply(abstracts.genes.filtered, length) > 0])] #count the genes for all queried chemicals with genes mentioned in abstracts query.result.gene.counts = mclapply(seq_along(abstracts.genes.filtered), function(x) { abstracts.genes.filtered[[x]] %>% table() %>% data.frame() %>% setNames(c('gene', 'count')) %>% mutate(gene = as.character(gene)) %>% mutate(rat = nh.gene.list$human[match(gene, nh.gene.list$rat)], mouse = nh.gene.list$human[match(gene, nh.gene.list$mouse)]) %>% mutate(gene = ifelse(is.na(rat), gene, rat), gene = ifelse(is.na(mouse), gene, mouse)) %>% select(gene, count) %>% group_by(gene) %>% summarise(count = sum(count)) %>% arrange(desc(count))}, mc.cores = 32) %>% setNames(names(abstracts.genes.filtered)) #calulate the final counts matrixs for all fo the samples abstract.gene.count.matrix = query.result.gene.counts %>% reduce(full_join, by = "gene") %>% arrange(gene) %>% column_to_rownames('gene') %>% t() %>% as.data.frame() %>% replace(is.na(.), 0) %>% rownames_to_column('sample') %>% select(-sample) %>% mutate(chemical = names(query.result.gene.counts)) %>% column_to_rownames('chemical') return(abstract.gene.count.matrix) } abstract.gene.count.matrix %>% rownames_to_column('chemicals') %>% write_csv(., path = "output/LINCS-chemical.no.map-abstract-gene-counts-2021.09.17.csv" ) save(abstract.gene.count.matrix, file = "../AbstractR-projects/LINCS/output/gene.counts.matrix-2021.09.17-stress.only.RData") ``` --- # PMI calculation ``` #This function reads a ```query_object``` output by the ```query_result``` function. The result is a list of #parwise mututal information scores resulting from PMI_query_combinations = function(query_result_object, chemical.list.path, search.min = 0, search.max = 100000, num.cores = 1) { #Libraries library(tidyverse) library(parallel) library(magrittr) #testingpaths # query_result_object = "input/build/pmi.testing.query.object.RData" # chemical.list.path = "input/build/pmi.testing.chem.list.csv" # search.min = 5 # search.max = 100000 # num.cores = 15 #read query result load(paste0(query_result_object)) chems.data = suppressMessages(read_csv(paste0(chemical.list.path))) %>% mutate(q.chemical = paste0('"',chemicals,'"')) #take only pmids and abstract from the query result and parse by/ aggregate by SRP/query search_matrix.i = mclapply(seq_along(query.result[[1]]), function(x) { query.result[[1]][[x]] %>% select(pmid) %>% distinct() %>% setNames(c("pmid")) %>% mutate(query = names(query.result[[1]][x]))}, mc.cores = num.cores) %>% do.call(rbind, .) %>% left_join(query.result[[2]], 'query') %>% mutate(search = query) %>% separate(col = search, into = c('q.chemical', 'search'), sep = ' AND ') %>% left_join(chems.data, 'q.chemical') %>% mutate(search = gsub('\\"','', search)) %>% group_split(SRP) # aggregate by SRP and chemcial name to remove non-inique PMIDs and remove negative query phrases search_matrix = mclapply(seq_along(search_matrix.i), function(y){ search_matrix.i[[y]] %>% ungroup() %>% group_by(SRP, chemicals, pmid) %>% summarise(dir.tot = direction %>% unique %>% sort %>% paste(collapse = ", ")) %>% #gets rid of negative querried PMIDs filter(!grepl(pattern = "neg", x = dir.tot)) %>% ungroup() %>% group_by(SRP, chemicals) %>% summarise(count = pmid %>% unique() %>% length) %>% #takes the unqiue PMID vector length as n unique searches ungroup() }, mc.cores = num.cores) %>% do.call(rbind, .) %>% pivot_wider(names_from = SRP, values_from = count, values_fill = 0) %>% filter(!is.na(chemicals)) %>% column_to_rownames("chemicals") %>% mutate(rsum = rowSums(across(where(is.numeric)))) #filter based on search returns search_matrix.wide.f = search_matrix %>% filter(rsum >= search.min & rsum <= search.max) %>% select(-rsum) #build a result matrix search_PMI = matrix(nrow = nrow(search_matrix.wide.f), ncol = ncol(search_matrix.wide.f)) %>% magrittr::set_rownames(rownames(search_matrix.wide.f)) %>% magrittr::set_colnames(colnames(search_matrix.wide.f)) #Calculate PMI for (i in 1:nrow(search_matrix.wide.f)) { for (j in 1:ncol(search_matrix.wide.f)){ search_PMI[i, j] <- (log2(sum(search_matrix.wide.f[i,])/sum(search_matrix.wide.f)) + log2(sum(search_matrix.wide.f[,j])/sum(search_matrix.wide.f)) -log2(search_matrix.wide.f[i,j]/sum(search_matrix.wide.f))) *-1 } } #clean up infs search_PMI = search_PMI %>% replace(is.infinite(.), 0) return(search_PMI) } PMI_query_combinations() ``` --- # select.highest --- # search hits table ``` #This function reads a ```query_object``` output by the ```query_result``` function. The result is a list of #parwise mututal information scores resulting from search_query_combinations = function(query_result_object, chemical.list.path, search.min = 0, search.max = 100000) { #Libraries library(tidyverse) library(parallel) library(magrittr) #testingpaths # query_result_object = "../AbstractR-projects/LINCS/output/query.result_LINCS_2021-07-29.RData" # chemical.list.path = "input/chemical-list-LINCS-2021.07.23.csv" # search.min = 5 # search.max = 100000 #read query result load(paste0(query_result_object)) chems.data = suppressMessages(read_csv(paste0(chemical.list.path))) %>% mutate(q.chemical = paste0('"',chemicals,'"')) #take only pmids and abstract from the query result and calc number results search_matrix = mclapply(seq_along(query.result[[1]]), function(x) { query.result[[1]][[x]] %>% pull(pmid) %>% unique() %>% length() }, mc.cores = 10) %>% setNames(names(query.result[[1]])) %>% unlist() %>% as.data.frame() %>% setNames('search_results') %>% rownames_to_column('query') %>% left_join(query.result[[2]], 'query') %>% mutate(search = query) %>% separate(col = search, into = c('q.chemical', 'search'), sep = ' AND ') %>% left_join(chems.data, 'q.chemical') %>% mutate(search = gsub('\\"','', search)) %>% select(chemicals, SRP, search, direction, search_results) #make a wide form matrix search_matrix.wide = search_matrix %>% select(chemicals, search, search_results) %>% pivot_wider(id_cols = chemicals, names_from = search, values_from = search_results) %>% replace(is.na(.), 0) %>% mutate(rsum = rowSums(across(where(is.numeric)))) #filter based on search returns search_matrix.wide.f = search_matrix.wide %>% filter(rsum >= search.min & rsum <= search.max) %>% select(-rsum) %>% column_to_rownames('chemicals') return(search_matrix.wide.f) } #This function takes a numeric matrix resulting from PMI, GSEA (connectivity), or some other metric and #' returns that same matrix with two additional columns: the name of the highest scoring column for each row and #' the corresponding score select.highest = function(score.matrix) { #Libraries suppressMessages(library(tidyverse)) #testingpaths #score.matrix = score.matrix #maxcolumn and value score.matrix.highest = data.frame(max_column = colnames(score.matrix)[apply(score.matrix, 1, which.max)], stringsAsFactors = FALSE) %>% cbind(score.matrix %>% apply(1, max) %>% as.data.frame() %>% setNames("max_value") %>% rownames_to_column(var = "row"), .) %>% left_join(score.matrix %>% as.data.frame() %>% rownames_to_column('row'), by = 'row') %>% column_to_rownames('row') return(score.matrix.highest) } ``` --- # WORK FLOW --- ``` suppressMessages(R.utils::sourceDirectory("lib/", modifiedOnly = FALSE, recursive = TRUE)) query.result = query_result(chemical.list.path = "input/chemical-list-LINCS-2021.07.23.csv", query.map.path = "input/query.map.csv", limit_input_to = 10) load("input/build/pmi.testing.query.object.RData") query.result[[3]] #save(query.result, file = "input/pmi.testing.query.object.RData") count.matrix = abstract_gene_counts(query_result_object = "input/build/pmi.testing.query.object.RData") count.matrix PMI_matrix = PMI_query_combinations(query_result_object = "input/build/pmi.testing.query.object.RData", chemical.list.path = "input/chemical-list-LINCS-2021.07.23.csv", search.min = 0, search.max = 100000) PMI_matrix PMI_matrix_best = select.highest(score.matrix = PMI_matrix) PMI_matrix_best ``` accuracy rank ``` #this function takes a matrix of measures for a sample set, along side a validated columns, and measures the weighted accuracy of each assignment function(score.matrix, val.col, measures.cols){ matrix = score.matrix } ``` --- STRACTH and LEARNING
github_jupyter
# Cyclical Systems: An Example of the Crank-Nicolson Method ## CH EN 2450 - Numerical Methods **Prof. Tony Saad (<a>www.tsaad.net</a>) <br/>Department of Chemical Engineering <br/>University of Utah** <hr/> ``` import numpy as np from numpy import * # %matplotlib notebook # %matplotlib nbagg %matplotlib inline %config InlineBackend.figure_format = 'svg' # %matplotlib qt import matplotlib.pyplot as plt from scipy.optimize import fsolve from scipy.integrate import odeint def forward_euler(rhs, f0, tend, dt): ''' Computes the forward_euler method ''' nsteps = int(tend/dt) f = np.zeros(nsteps) f[0] = f0 time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): f[n+1] = f[n] + dt * rhs(f[n], time[n]) return time, f def forward_euler_system(rhsvec, f0vec, tend, dt): ''' Solves a system of ODEs using the Forward Euler method ''' nsteps = int(tend/dt) neqs = len(f0vec) f = np.zeros( (neqs, nsteps) ) f[:,0] = f0vec time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): t = time[n] f[:,n+1] = f[:,n] + dt * rhsvec(f[:,n], t) return time, f def be_residual(fnp1, rhs, fn, dt, tnp1): ''' Nonlinear residual function for the backward Euler implicit time integrator ''' return fnp1 - fn - dt * rhs(fnp1, tnp1) def backward_euler(rhs, f0, tend, dt): ''' Computes the backward euler method :param rhs: an rhs function ''' nsteps = int(tend/dt) f = np.zeros(nsteps) f[0] = f0 time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): fn = f[n] tnp1 = time[n+1] fnew = fsolve(be_residual, fn, (rhs, fn, dt, tnp1)) f[n+1] = fnew return time, f def cn_residual(fnp1, rhs, fn, dt, tnp1, tn): ''' Nonlinear residual function for the Crank-Nicolson implicit time integrator ''' return fnp1 - fn - 0.5 * dt * ( rhs(fnp1, tnp1) + rhs(fn, tn) ) def crank_nicolson(rhs,f0,tend,dt): nsteps = int(tend/dt) f = np.zeros(nsteps) f[0] = f0 time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): fn = f[n] tnp1 = time[n+1] tn = time[n] fnew = fsolve(cn_residual, fn, (rhs, fn, dt, tnp1, tn)) f[n+1] = fnew return time, f ``` # Sharp Transient Solve the ODE: \begin{equation} \frac{\text{d}y}{\text{d}t} = -1000 y + 3000 - 2000 e^{-t};\quad y(0) = 0 \end{equation} The analytical solution is \begin{equation} y(t) = 3 - 0.998 e^{-1000t} - 2.002 e^{-t} \end{equation} We first plot the analytical solution ``` y = lambda t : 3 - 0.998*exp(-1000*t) - 2.002*exp(-t) t = np.linspace(0,1,500) plt.plot(t,y(t)) plt.grid() ``` Now let's solve this numerically. We first define the RHS for this function ``` def rhs_sharp_transient(f,t): return 3000 - 1000 * f - 2000* np.exp(-t) ``` Let's solve this using forward euler and backward euler ``` y0 = 0 tend = 0.03 dt = 0.001 t,yfe = forward_euler(rhs_sharp_transient,y0,tend,dt) t,ybe = backward_euler(rhs_sharp_transient,y0,tend,dt) t,ycn = crank_nicolson(rhs_sharp_transient,y0,tend,dt) plt.plot(t,y(t),label='Exact') # plt.plot(t,yfe,'r.-',markevery=1,markersize=10,label='Forward Euler') plt.plot(t,ybe,'k*-',markevery=2,markersize=10,label='Backward Euler') plt.plot(t,ycn,'o-',markevery=2,markersize=2,label='Crank Nicholson') plt.grid() plt.legend() ``` # Oscillatory Systems Solve the ODE: Solve the ODE: \begin{equation} \frac{\text{d}y}{\text{d}t} = r \omega \sin(\omega t) \end{equation} The analytical solution is \begin{equation} y(t) = r - r \cos(\omega t) \end{equation} First plot the analytical solution ``` r = 0.5 ω = 0.02 y = lambda t : r - r * cos(ω*t) t = np.linspace(0,100*pi) plt.clf() plt.plot(t,y(t)) plt.grid() ``` Let's solve this numerically ``` def rhs_oscillatory(f,t): r = 0.5 ω = 0.02 return r * ω * sin(ω*t) y0 = 0 tend = 100*pi dt = 10 t,yfe = forward_euler(rhs_oscillatory,y0,tend,dt) t,ybe = backward_euler(rhs_oscillatory,y0,tend,dt) t,ycn = crank_nicolson(rhs_oscillatory,y0,tend,dt) plt.plot(t,y(t),label='Exact') plt.plot(t,yfe,'r.-',markevery=1,markersize=10,label='Forward Euler') plt.plot(t,ybe,'k*-',markevery=2,markersize=10,label='Backward Euler') plt.plot(t,ycn,'o-',markevery=2,markersize=2,label='Crank Nicholson') plt.grid() plt.legend() plt.savefig('cyclical-system-example.pdf') import urllib import requests from IPython.core.display import HTML def css_styling(): styles = requests.get("https://raw.githubusercontent.com/saadtony/NumericalMethods/master/styles/custom.css") return HTML(styles.text) css_styling() ```
github_jupyter
# Unsupervised neural computation - Practical Dependencies: - Python (>= 2.6 or >= 3.3) - NumPy (>= 1.6.1) - SciPy (>= 0.12) - SciKit Learn (>=0.18.1) Just as there are different ways in which we ourselves learn from our own surrounding environments, so it is with neural networks. In a broad sense, we may categorize the learning processes through which neural networks function as follows: learning with a teacher and learning without a teacher. These different forms of learning as performed on neural networks parallel those of human learning. Learning with a teacher is also referred to as supervised learning. In conceptual terms, we may think of the teacher as having knowledge of the environment, with that knowledge being represented by a set of input - output examples. Unsupervised learning does not require target vectors for the outputs. Without input-output training pairs as external teachers, unsupervised learning is self-organized to produce consistent output vectors by modifying weights. That is to say, there are no labelled examples of the function to be learned by the network. For a specific task-independent measure, once the network has become tuned to the statistical regularities of the input data, the network develops the ability to discover internal structure for encoding features of the input or compress the input data, and thereby to create new classes automatically. ## Radial Basis Functions and Radial Basis Function Networks - Semi-supervised Learning ### combining supervised and unsupervised learning In machine learning, the radial basis function kernel, or RBF kernel, is a popular kernel function (typically Gaussian) used in various kernelized learning algorithms. ``` # Class implementing the basic RBF parametrization # based on code from https://github.com/jeffheaton/aifh import numpy as np class RbfFunction(object): def __init__(self, dimensions, params, index): self.dimensions = dimensions self.params = params self.index = index @property def width(self): return self.params[self.index] @width.setter def width(self, value): self.params[self.index] = value def set_center(self, index, value): self.params[self.index + index + 1] = value def get_center(self, index): return self.params[self.index + index + 1] ``` RBFs can take various shapes: quadratic, multi-quadratic, inverse multi-quadratic, mexican hat. Yet the most used is the Gaussian. ``` # Class implementing a Gaussian RBF class RbfGaussian(RbfFunction): def evaluate(self, x): value = 0 width = self.width for i in range(self.dimensions): center = self.get_center(i) value += ((x[i] - center) ** 2) / (2.0 * width * width) return np.exp(-value) ``` A RBF network is an advanced machine learning algorithm that uses a series of RBF functions to perform regression. It can also perform classification by means of one-of-n encoding. The long term memory of a RBF network is made up of the widths and centers of the RBF functions, as well as input and output weighting. ``` # Class implementing a Gaussian RBF Network class RbfNetwork(object): def __init__(self, input_count, rbf_count, output_count): """ Create an RBF network with the specified shape. @param input_count: The input count. @param rbf_count: The RBF function count. @param output_count: The output count. """ self.input_count = input_count self.output_count = output_count # calculate input and output weight counts # add 1 to output to account for an extra bias node input_weight_count = input_count * rbf_count output_weight_count = (rbf_count + 1) * output_count rbf_params = (input_count + 1) * rbf_count self.long_term_memory = np.zeros((input_weight_count + output_weight_count + rbf_params), dtype=float) self.index_input_weights = 0 self.index_output_weights = input_weight_count + rbf_params self.rbf = {} # default the Rbf's to gaussian for i in range(0, rbf_count): rbf_index = input_weight_count + ((input_count + 1) * i) self.rbf[i] = RbfGaussian(input_count, self.long_term_memory, rbf_index) def compute_regression(self, input): """ Compute the output for the network. @param input: The input pattern. @return: The output pattern. """ # first, compute the output values of each of the RBFs # Add in one additional RBF output for bias (always set to one). rbf_output = [0] * (len(self.rbf) + 1) # bias rbf_output[len(rbf_output) - 1] = 1.0 for rbfIndex in range(0, len(self.rbf)): # weight the input weighted_input = [0] * len(input) for inputIndex in range(0, len(input)): memory_index = self.index_input_weights + (rbfIndex * self.input_count) + inputIndex weighted_input[inputIndex] = input[inputIndex] * self.long_term_memory[memory_index] # calculate the rbf rbf_output[rbfIndex] = self.rbf[rbfIndex].evaluate(weighted_input) # Second, calculate the output, which is the result of the weighted result of the RBF's. result = [0] * self.output_count for outputIndex in range(0, len(result)): sum_value = 0 for rbfIndex in range(0, len(rbf_output)): # add 1 to rbf length for bias memory_index = self.index_output_weights + (outputIndex * (len(self.rbf) + 1)) + rbfIndex sum_value += rbf_output[rbfIndex] * self.long_term_memory[memory_index] result[outputIndex] = sum_value # finally, return the result. return result def reset(self): """ Reset the network to a random state. """ for i in range(0, len(self.long_term_memory)): self.long_term_memory[i] = np.random.uniform(0, 1) def compute_classification(self, input): """ Compute the output and return the index of the output with the largest value. This is the class that the network recognized. @param input: The input pattern. @return: """ output = self.compute_regression(input) return output.index(max(output)) def copy_memory(self, source): """ Copy the specified vector into the long term memory of the network. @param source: The source vector. """ for i in range(0, len(source)): self.long_term_memory[i] = source[i] ``` The Iris dataset is a traditional benchmark in classification problems in ML. The data set consists of 50 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimetres. Based on the combination of these four features, Fisher developed a linear discriminant model to distinguish the species from each other. The Iris flower data set or Fisher's Iris data set is a multivariate data set introduced by Ronald Fisher in his 1936 paper "The use of multiple measurements in taxonomic problems" as an example of linear discriminant analysis. It is sometimes called Anderson's Iris data set because Edgar Anderson collected the data to quantify the morphologic variation of Iris flowers of three related species. Based on Fisher's linear discriminant model, this data set became a typical test case for many statistical classification techniques in machine learning such as support vector machines. In the following we will use simulated annealing to fit an RBF network to the Iris data set, to classifiy the iris species correctly. Simulated annealing is a probabilistic technique for approximating the global optimum of a given function. Specifically, it is a metaheuristic to approximate global optimization in a large search space. ``` # Find the dataset import os import sys from normalize import Normalize from error import ErrorCalculation from train import TrainAnneal import numpy as np irisFile = os.path.abspath("./data/iris.csv") # Read the Iris data set print('Reading CSV file: ' + irisFile) norm = Normalize() iris_work = norm.load_csv(irisFile) # Extract the original iris species so we can display during the final validation ideal_species = [row[4] for row in iris_work] # Setup the first four fields to "range normalize" between -1 and 1. for i in range(0, 4): norm.make_col_numeric(iris_work, i) norm.norm_col_range(iris_work, i, 0, 1) # Discover all of the classes for column #4, the iris species. classes = norm.build_class_map(iris_work, 4) inv_classes = {v: k for k, v in classes.items()} # Normalize iris species using one-of-n. # We could have used equilateral as well. For an example of equilateral, see the example_nm_iris example. norm.norm_col_one_of_n(iris_work, 4, classes, 0, 1) # Prepare training data. Separate into input and ideal. training = np.array(iris_work) training_input = training[:, 0:4] training_ideal = training[:, 4:7] # Define the score of the training process of the network def score_funct(x): """ The score function for Iris anneal. @param x: @return: """ global best_score global input_data global output_data # Update the network's long term memory to the vector we need to score. network.copy_memory(x) # Loop over the training set and calculate the output for each. actual_output = [] for input_data in training_input: output_data = network.compute_regression(input_data) actual_output.append(output_data) # Calculate the error with MSE. result = ErrorCalculation.mse(np.array(actual_output), training_ideal) return result # Create an RBF network. There are four inputs and two outputs. # There are also five RBF functions used internally. # You can experiment with different numbers of internal RBF functions. # However, the input and output must match the data set. inputs = 4 rbfs = 4 outputs = 3 network = RbfNetwork(inputs, rbfs, outputs) network.reset() # Create a copy of the long-term memory. This becomes the initial state. x0 = list(network.long_term_memory) # Perform the annealing # Train a Machine Learning Algorithm using Simulated Annealing. Simulated Annealing is a Monte Carlo algorithm # that is based on annealing in metallurgy, a technique involving heating and controlled cooling of a # material to increase the size of its crystals and reduce their defects, both are attributes of the material # that depend on its thermodynamic free energy. train = TrainAnneal() train.display_iteration = True train.train(x0, score_funct) # Display the final validation. We show all of the iris data as well as the predicted species. for i in range(0, len(training_input)): input_data = training_input[i] # Compute the output from the RBF network output_data = network.compute_regression(input_data) ideal_data = training_ideal[i] # Decode the three output neurons into a class number. class_id = norm.denorm_one_of_n(output_data) print(str(input_data) + " -> " + inv_classes[class_id] + ", Ideal: " + ideal_species[i]) ``` It is often used when the search space is discrete (e.g., all tours that visit a given set of cities). For problems where finding an approximate global optimum is more important than finding a precise local optimum in a fixed amount of time, simulated annealing may be preferable to alternatives such as gradient descent. ## Assignments Given the RBFN API please follow the next steps to train a RBF to clssify the Iris dataset. ``` # Perform the simmulated annealing. # Display the final validation. We show all of the iris data as well as the predicted species. # Compute the output from the RBF network # Decode the three output neurons into a class number and print it ``` # Vector Quantization Vector quantization (VQ) is a form of competitive learning. Such an algorithm is able to discover structure in the input data. Generally speaking, vector quantization is a form of lossy data compression—lossy in the sense that some information contained in the input data is lost as a result of the compression. ![title](img/vq_alg.png) An input data point belongs to a certain class if its position (in the 2D space) is closest to the class prototype, fulfilling the Voronoi partitioning (i.e. partitioning of a plane into regions based on distance to points in a specific subset of the plane. ![title](img/vq.png) In a typical scenario, such behavior can be implemented with a neural network that consists of two layers—an input layer and a competitive layer with lateral inhibition. The input layer receives the available data. The competitive layer consists of neurons that compete with each other. ![title](img/vq_net.png) The classic image processing example, Lena, an 8-bit grayscale bit-depth, 512 x 512 sized image, is used here to illustrate how `k`-means is used for vector quantization. ``` import numpy as np import scipy as sp import matplotlib.pyplot as plt from sklearn import cluster from sklearn.utils.testing import SkipTest from sklearn.utils.fixes import sp_version try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) n_clusters = 5 np.random.seed(0) X = face.reshape((-1, 1)) # We need an (n_sample, n_feature) array k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4) k_means.fit(X) values = k_means.cluster_centers_.squeeze() labels = k_means.labels_ # create an array from labels and values face_compressed = np.choose(labels, values) face_compressed.shape = face.shape vmin = face.min() vmax = face.max() ``` Plot the results of the clutering and plot the original, quatized, and histogram. ``` # original face plt.figure(1, figsize=(3, 2.2)) plt.imshow(face, cmap=plt.cm.gray, vmin=vmin, vmax=256) # compressed face plt.figure(2, figsize=(3, 2.2)) plt.imshow(face_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # equal bins face regular_values = np.linspace(0, 256, n_clusters + 1) regular_labels = np.searchsorted(regular_values, face) - 1 regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean regular_face = np.choose(regular_labels.ravel(), regular_values, mode="clip") regular_face.shape = face.shape plt.figure(3, figsize=(3, 2.2)) plt.imshow(regular_face, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # histogram plt.figure(4, figsize=(3, 2.2)) plt.clf() plt.axes([.01, .01, .98, .98]) plt.hist(X, bins=256, color='.5', edgecolor='.5') plt.yticks(()) plt.xticks(regular_values) values = np.sort(values) for center_1, center_2 in zip(values[:-1], values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b') for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--') plt.show() ``` ## Assignments In this problem you should group 2d input points (x,y) into clusters and determine the center of each cluster. The number of required clusters is provided as integer number on the first line. Following, the system provides an unknown number of 2d input data points (x, y), one per line. Continue reading until your program obtains no more data. You can safely assume to read less than 1000 points. After reading, you should run the Vector Quantization algorithm to find the center(s) of input data, and finally report the center position as x, y coordinate. Present one such center position per output line. The order of center points output does not matter. 3 cluster VQ ![title](img/vq_3clust.png) ``` # load the datasets for training and testing import numpy as np import csv with open('./data/vq_3clust_in.txt') as inputfile: train_data = list(csv.reader(inputfile)) with open('./data/vq_3clust_out.txt') as inputfile: test_data = list(csv.reader(inputfile)) # add network code here ``` 6 cluster VQ ![title](img/vq_3clust.png) ``` # load the datasets for training and testing for the 6 cluster example import numpy as np import csv with open('./data/vq_6clust_in.txt') as inputfile: train_data = list(csv.reader(inputfile)) with open('./data/vq_6clust_out.txt') as inputfile: test_data = list(csv.reader(inputfile)) # add network code here ``` # Self-Organizing Maps In neurobiology, during neural growth, synapses are strengthened or weakened, in a process usually modelled as a competition for resources. In such a learning process, there is a competition between the neurons to fire. More precisely, neurons compete with each other (in accordance with a learning rule) for the “opportunity” to respond to features contained in the input data. ![title](img/som.png) In its simplest form, such behaviour describes a “winner-takes-all” strategy. In such a strategy, the neuron with the greatest total input “wins” the competition and turns on; all the other neurons in the network then switch off. The aim of such learning mechanisms is to cluster the data. ![title](img/som_tr.png) Kohonen’s self-organizing map (SOM) is one of the most popular unsupervised neural network models. Developed for an associative memory model, it is an unsupervised learning algorithm with a simple structure and computational form, and is motivated by the retina-cortex mapping. The SOM can provide topologically preserved mapping from input to output spaces, such that “nearby” sensory stimuli are represented in “nearby” regions. ![title](img/som_alg.png) ``` # Class implementing a basic SOM import scipy.spatial import numpy as np import scipy as sp import sys class SelfOrganizingMap: """ The weights of the output neurons base on the input from the input neurons. """ def __init__(self, input_count, output_count): """ The constructor. :param input_count: Number of input neurons :param output_count: Number of output neurons :return: """ self.input_count = input_count self.output_count = output_count self.weights = np.zeros([self.output_count, self.input_count]) self.distance = sp.spatial.distance.euclidean def calculate_error(self, data): bmu = BestMatchingUnit(self) bmu.reset() # Determine the BMU for each training element. for input in data: bmu.calculate_bmu(input) # update the error return bmu.worst_distance / 100.0 def classify(self, input): if len(input) > self.input_count: raise Exception("Can't classify SOM with input size of {} " "with input data of count {}".format(self.input_count, len(input))) min_dist = sys.maxfloat result = -1 for i in range(self.output_count): dist = self.distance.calculate(input, self.weights[i]) if dist < min_dist: min_dist = dist result = i return result def reset(self): self.weights = (np.random.rand(self.weights.shape[0], self.weights.shape[1]) * 2.0) - 1 ``` The "Best Matching Unit" or BMU is a very important concept in the training for a SOM. The BMU is the output neuron that has weight connections to the input neurons that most closely match the current input vector. This neuron (and its "neighborhood") are the neurons that will receive training. ``` # Class implementing the competition stage in SOM, finding the best matching unit. class BestMatchingUnit: """ This class also tracks the worst distance (of all BMU's). This gives some indication of how well the network is trained, and thus becomes the "error" of the entire network. """ def __init__(self, som): """ Construct a BestMatchingUnit class. The training class must be provided. :param som: The SOM to evaluate. """ # The owner of this class. self.som = som # What is the worst BMU distance so far, this becomes the error for the # entire SOM. self.worst_distance = 0 def calculate_bmu(self, input): """ Calculate the best matching unit (BMU). This is the output neuron that has the lowest Euclidean distance to the input vector. :param input: The input vector. :return: The output neuron number that is the BMU. """ result = 0 if len(input) > self.som.input_count: raise Exception( "Can't train SOM with input size of {} with input data of count {}.".format(self.som.input_count, len(input))) # Track the lowest distance so far. lowest_distance = float("inf") for i in range(self.som.output_count): distance = self.calculate_euclidean_distance(self.som.weights, input, i) # Track the lowest distance, this is the BMU. if distance < lowest_distance: lowest_distance = distance result = i # Track the worst distance, this is the error for the entire network. if lowest_distance > self.worst_distance: self.worst_distance = lowest_distance return result def calculate_euclidean_distance(self, matrix, input, output_neuron): """ Calculate the Euclidean distance for the specified output neuron and the input vector. This is the square root of the squares of the differences between the weight and input vectors. :param matrix: The matrix to get the weights from. :param input: The input vector. :param outputNeuron: The neuron we are calculating the distance for. :return: The Euclidean distance. """ result = 0 # Loop over all input data. diff = input - matrix[output_neuron] return np.sqrt(sum(diff*diff)) ``` In the next section we analyze competitive training, which would be used in a winner-take-all neural network, such as the self organizing map (SOM). This is an unsupervised training method, no ideal data is needed on the training set. If ideal data is provided, it will be ignored. Training is done by looping over all of the training elements and calculating a "best matching unit" (BMU). This BMU output neuron is then adjusted to better "learn" this pattern. Additionally, this training may be applied to other "nearby" output neurons. The degree to which nearby neurons are update is defined by the neighborhood function. A neighborhood function is required to determine the degree to which neighboring neurons (to the winning neuron) are updated by each training iteration. Because this is unsupervised training, calculating an error to measure progress by is difficult. The error is defined to be the "worst", or longest, Euclidean distance of any of the BMU's. This value should be minimized, as learning progresses. ``` # Class implementing the basic training algorithm for a SOM class BasicTrainSOM: """ Because only the BMU neuron and its close neighbors are updated, you can end up with some output neurons that learn nothing. By default these neurons are not forced to win patterns that are not represented well. This spreads out the workload among all output neurons. This feature is not used by default, but can be enabled by setting the "forceWinner" property. """ def __init__(self, network, learning_rate, training, neighborhood): # The neighborhood function to use to determine to what degree a neuron # should be "trained". self.neighborhood = neighborhood # The learning rate. To what degree should changes be applied. self.learning_rate = learning_rate # The network being trained. self.network = network # How many neurons in the input layer. self.input_neuron_count = network.input_count # How many neurons in the output layer. self.output_neuron_count = network.output_count # Utility class used to determine the BMU. self.bmu_util = BestMatchingUnit(network) # Correction matrix. self.correction_matrix = np.zeros([network.output_count, network.input_count]) # True is a winner is to be forced, see class description, or forceWinners # method. By default, this is true. self.force_winner = False # When used with autodecay, this is the starting learning rate. self.start_rate = 0 # When used with autodecay, this is the ending learning rate. self.end_rate = 0 # When used with autodecay, this is the starting radius. self.start_radius = 0 # When used with autodecay, this is the ending radius. self.end_radius = 0 # This is the current autodecay learning rate. self.auto_decay_rate = 0 # This is the current autodecay radius. self.auto_decay_radius = 0 # The current radius. self.radius = 0 # Training data. self.training = training def _apply_correction(self): """ Loop over the synapses to be trained and apply any corrections that were determined by this training iteration. """ np.copyto(self.network.weights, self.correction_matrix) def auto_decay(self): """ Should be called each iteration if autodecay is desired. """ if self.radius > self.end_radius: self.radius += self.auto_decay_radius if self.learning_rate > self.end_rate: self.learning_rate += self.auto_decay_rate self.neighborhood.radius = self.radius def copy_input_pattern(self, matrix, output_neuron, input): """ Copy the specified input pattern to the weight matrix. This causes an output neuron to learn this pattern "exactly". This is useful when a winner is to be forced. :param matrix: The matrix that is the target of the copy. :param output_neuron: The output neuron to set. :param input: The input pattern to copy. """ matrix[output_neuron, :] = input def decay(self, decay_rate, decay_radius): """ Decay the learning rate and radius by the specified amount. :param decay_rate: The percent to decay the learning rate by. :param decay_radius: The percent to decay the radius by. """ self.radius *= (1.0 - decay_radius) self.learning_rate *= (1.0 - decay_rate) self.neighborhood.radius = self.radius def _determine_new_weight(self, weight, input, currentNeuron, bmu): """ Determine the weight adjustment for a single neuron during a training iteration. :param weight: The starting weight. :param input: The input to this neuron. :param currentNeuron: The neuron who's weight is being updated. :param bmu: The neuron that "won", the best matching unit. :return: The new weight value. """ return weight \ + (self.neighborhood.fn(currentNeuron, bmu) \ * self.learning_rate * (input - weight)) def _force_winners(self, matrix, won, least_represented): """ Force any neurons that did not win to off-load patterns from overworked neurons. :param matrix: An array that specifies how many times each output neuron has "won". :param won: The training pattern that is the least represented by this neural network. :param least_represented: The synapse to modify. :return: True if a winner was forced. """ max_activation = float("-inf") max_activation_neuron = -1 output = self.compute(self.network, self.least_represented) # Loop over all of the output neurons. Consider any neurons that were # not the BMU (winner) for any pattern. Track which of these # non-winning neurons had the highest activation. for output_neuron in range(len(won)): # Only consider neurons that did not "win". if won[output_neuron] == 0: if (max_activation_neuron == -1) \ or (output[output_neuron] > max_activation): max_activation = output[output_neuron] max_activation_neuron = output_neuron # If a neurons was found that did not activate for any patterns, then # force it to "win" the least represented pattern. if max_activation_neuron != -1: self.copy_input_pattern(matrix, max_activation_neuron, least_represented) return True else: return False def iteration(self): """ Perform one training iteration. """ # Reset the BMU and begin this iteration. self.bmu_util.reset() won = [0] * self.output_neuron_count least_represented_activation = float("inf") least_represented = None # Reset the correction matrix for this synapse and iteration. self.correctionMatrix.clear() # Determine the BMU for each training element. for input in self.training: bmu = self.bmu_util.calculate_bmu(input) won[bmu] += 1 # If we are to force a winner each time, then track how many # times each output neuron becomes the BMU (winner). if self.force_winner: # Get the "output" from the network for this pattern. This # gets the activation level of the BMU. output = self.compute(self.network, input) # Track which training entry produces the least BMU. This # pattern is the least represented by the network. if output[bmu] < least_represented_activation: least_represented_activation = output[bmu] least_represented = input.getInput() self.train(bmu, self.network.getWeights(), input.getInput()) if self.force_winner: # force any non-winning neurons to share the burden somewhat if not self.force_winners(self.network.weights, won, least_represented): self.apply_correction() else: self.apply_correction() def set_auto_decay(self, planned_iterations, start_rate, end_rate, start_radius, end_radius): """ Setup autodecay. This will decrease the radius and learning rate from the start values to the end values. :param planned_iterations: The number of iterations that are planned. This allows the decay rate to be determined. :param start_rate: The starting learning rate. :param end_rate: The ending learning rate. :param start_radius: The starting radius. :param end_radius: The ending radius. """ self.start_rate = start_rate self.end_rate = end_rate self.start_radius = start_radius self.end_radius = end_radius self.auto_decay_radius = (end_radius - start_radius) / planned_iterations self.auto_decay_rate = (end_rate - start_rate) / planned_iterations self.set_params(self.start_rate, self.start_radius) def set_params(self, rate, radius): """ Set the learning rate and radius. :param rate: The new learning rate. :param radius: :return: The new radius. """ self.radius = radius self.learning_rate = rate self.neighborhood.radius = radius def get_status(self): """ :return: A string display of the status. """ result = "Rate=" result += str(self.learning_rate) result += ", Radius=" result += str(self.radius) return result def _train(self, bmu, matrix, input): """ Train for the specified synapse and BMU. :param bmu: The best matching unit for this input. :param matrix: The synapse to train. :param input: The input to train for. :return: """ # adjust the weight for the BMU and its neighborhood for output_neuron in range(self.output_neuron_count): self._train_pattern(matrix, input, output_neuron, bmu) def _train_pattern(self, matrix, input, current, bmu): """ Train for the specified pattern. :param matrix: The synapse to train. :param input: The input pattern to train for. :param current: The current output neuron being trained. :param bmu: The best matching unit, or winning output neuron. """ for input_neuron in range(self.input_neuron_count): current_weight = matrix[current][input_neuron] input_value = input[input_neuron] new_weight = self._determine_new_weight(current_weight, input_value, current, bmu) self.correction_matrix[current][input_neuron] = new_weight def train_single_pattern(self, pattern): """ Train the specified pattern. Find a winning neuron and adjust all neurons according to the neighborhood function. :param pattern: The pattern to train. """ bmu = self.bmu_util.calculate_bmu(pattern) self._train(bmu, self.network.weights, pattern) self._apply_correction() def compute(self, som, input): """ Calculate the output of the SOM, for each output neuron. Typically, you will use the classify method instead of calling this method. :param som: The input pattern. :param input: The output activation of each output neuron. :return: """ result = np.zeros(som.output_count) for i in range(som.output_count): optr = som.weights[i] matrix_a = np.zeros([input.length,1]) for j in range(len(input)): matrix_a[0][j] = input[j] matrix_b = np.zeros(1,input.length) for j in range(len(optr)): matrix_b[0][j] = optr[j] result[i] = np.dot(matrix_a, matrix_b) return result ``` A common example used to help teach the principals behind SOMs is the mapping of colours from their three dimensional components - red, green and blue, into two dimensions.The colours are presented to the network as 3D vectors - one dimension for each of the colour components (RGB encoding) - and the network learns to represent them in the 2D space we can see. Notice that in addition to clustering the colours into distinct regions, regions of similar properties are usually found adjacent to each other. ``` import os import sys from Tkinter import * import numpy as np from neighborhood import * TILES_WIDTH = 50 TILES_HEIGHT = 50 TILE_SCREEN_SIZE = 10 class DisplayColors: def __init__(self,root,samples): # Build the grid display canvas_width = TILES_WIDTH * TILE_SCREEN_SIZE canvas_height = TILES_HEIGHT * TILE_SCREEN_SIZE self.samples = samples self.root = root self.c = Canvas(self.root,width=canvas_width, height=canvas_height) self.c.pack() self.grid_rects = [[None for j in range(TILES_WIDTH)] for i in range(TILES_HEIGHT)] for row in range(TILES_HEIGHT): for col in range(TILES_WIDTH): x = col * TILE_SCREEN_SIZE y = row * TILE_SCREEN_SIZE r = self.c.create_rectangle(x, y, x+TILE_SCREEN_SIZE,y+TILE_SCREEN_SIZE, fill="white") self.grid_rects[row][col] = r self.som = SelfOrganizingMap(3,TILES_WIDTH * TILES_HEIGHT) self.som.reset() self.gaussian = NeighborhoodRBF(NeighborhoodRBF.TYPE_GAUSSIAN,[TILES_WIDTH,TILES_HEIGHT]) self.train = BasicTrainSOM(self.som, 0.01, None, self.gaussian) self.train.force_winner = False self.train.set_auto_decay(1000, 0.8, 0.003, 30, 5) self.iteration = 1 def RGBToHTMLColor(self, rgb_tuple): hexcolor = '#%02x%02x%02x' % rgb_tuple return hexcolor def convert_color(self, d): result = 128*d result+= 128 result = min(result, 255) result = max(result, 0) return result def update(self, som): for row in range(TILES_HEIGHT): for col in range(TILES_WIDTH): index = (row*TILES_WIDTH)+col color = ( self.convert_color(som.weights[index][0]), self.convert_color(som.weights[index][1]), self.convert_color(som.weights[index][2])) r = self.grid_rects[row][col] self.c.itemconfig(r, fill=self.RGBToHTMLColor(color)) self.c.itemconfig(r, outline=self.RGBToHTMLColor(color)) def update_clock(self): idx = np.random.randint(len(samples)) c = self.samples[idx] self.train.train_single_pattern(c) self.train.auto_decay() self.update(self.som) print("Iteration {}, {}".format(self.iteration,self.train.get_status())) self.iteration+=1 if self.iteration<=1000: self.root.after(1, self.update_clock) samples = np.zeros([15,3]) for i in range(15): samples[i][0] = np.random.uniform(-1,1) samples[i][1] = np.random.uniform(-1,1) samples[i][2] = np.random.uniform(-1,1) root = Tk() display = DisplayColors(root, samples) display.update_clock() root.mainloop() ``` # Asignments In this assignment a solution path for the Traveling Salesman Problem (finding a short path to travel once to each city and return home), for an unknown number of cities as input (you can safely assume <= 1000 cities). Each city consists of an ID (an integer number), and X and Y position of that city (two integer numbers). The provided input format for each line to read in is CITY-ID,X,Y\n. Your program shall implement a Self-Organizing Map to accomplish this task. When your SOM finished learning, print the path as one city-id per line, followed by '\n'. Example for three cities with IDs 1,2,3 which are visited in the order 3,1,2: 3\n 1\n 2\n Remember that the number of cities in the output corresponds exactly to the number of cities in the input. It does not matter which of the cities is the first on your path. You can safely assume that your program does not need to find the shortest possible path (remember, this problem is NP hard!), but your result needs to be within 15% of the shortest path we found (which again might not be optimal). A travelling salesmap across Europe :) ![title](img/som_ts_eu.png) ``` # load the datasets for training and testing for TS in Europe import numpy as np import csv with open('./data/som_ts_in.txt') as inputfile: train_data = list(csv.reader(inputfile)) with open('./data/som_ts_out.txt') as inputfile: test_data = list(csv.reader(inputfile)) # add network code here ``` And for a more complex example, consider a more restricted dataset. ![title](img/som_ts_random.png) ``` # load the datasets for training and testing for TS import numpy as np import csv with open('./data/som_ts_in_aux.txt') as inputfile: train_data = list(csv.reader(inputfile)) with open('./data/som_ts_out_aux.txt') as inputfile: test_data = list(csv.reader(inputfile)) # add network code here ``` # Hopfield Networks Donald Hebb hypothesized in 1949 how neurons are connected with each other in the brain: “When an axon of cell A is near enough to excite a cell B and repeatedly or persistently takes part in firing it, some growth process or metabolic change takes place in one or both cells such that A’s efficiency, as one of the cells firing B, is increased.”, and postulated a new learning mechanism, Hebbian learning. In other words neural networks stores and retrieves associations, which are learned as synaptic connection. In Hebbian learning, both presynaptic and postsynaptic neurons are involved. Human memory thus works in an associative or content-addressable way. The model is a recurrent neural network with fully interconnected neurons. The number of feedback loops is equal to the number of neurons. Basically, the output of each neuron is fed back, via a unit-time delay element, to each of the other neurons in the network. ![title](img/hopfield.png) Such a structure allows the network to recognise any of the learned patterns by exposure to only partial or even some corrupted information about that pattern, i.e., it eventually settles down and returns the closest pattern or the best guess. ``` # Class implementing a Hopfield Network import numpy as np from energetic import EnergeticNetwork class HopfieldNetwork(EnergeticNetwork): def __init__(self, neuron_count): EnergeticNetwork.__init__(self, neuron_count) self.input_count = neuron_count self.output_count = neuron_count self.activation_function = lambda d: 1 if (d > 0) else 0 def compute(self, input): """ Note: for Hopfield networks, you will usually want to call the "run" method to compute the output. This method can be used to copy the input data to the current state. A single iteration is then run, and the new current state is returned. :param input: The input pattern. :return: The new current state. """ result = self.current_state[:] self.run() for i in range(self.current_state): result[i] = self.activation_function(self.current_state[i]) self.current_state[:] = result return result def run(self): """ Perform one Hopfield iteration. """ for to_neuron in range(self.neuron_count): sum = 0 for from_neuron in range(self.neuron_count): sum += self.current_state[from_neuron] \ * self.get_weight(from_neuron, to_neuron) self.current_state[to_neuron] = self.activation_function(sum) def run_until_stable(self, max_cycle): """ Run the network until it becomes stable and does not change from more runs. :param max_cycle: The maximum number of cycles to run before giving up. :return: The number of cycles that were run. """ done = False last_state_str = str(self.current_state) current_state_str = last_state_str cycle = 0 while not done: self.run() cycle += 1 last_state_str = str(self.current_state) if last_state_str == current_state_str: if cycle > max_cycle: done = True else: done = True current_state_str = last_state_str return cycle def energy(self): t = 0 # Calculate first term a = 0 for i in range(self.input_count): for j in range(self.output_count): a += self.get_weight(i, j) * self.current_state[i] * self.current_state[j] a *= -0.5 # Calculate second term b = 0 for i in range(self.input_count): b += self.current_state[i] * t return a+b ``` In the next section we implement the Hopefield Network training algorithm ![title](img/hopfield_alg.png) ``` class TrainHopfieldHebbian: def __init__(self, network): self.network = network; self.sum_matrix = np.zeros([network.input_count, network.input_count]) self.pattern_count = 1 def add_pattern(self, pattern): for i in range(self.network.input_count): for j in range(self.network.input_count): if i == j: self.sum_matrix[i][j] = 0 else: self.sum_matrix[i][j] += pattern[i] * pattern[j] self.pattern_count += 1 def learn(self): if self.pattern_count == 0: raise Exception("Please add a pattern before learning. Nothing to learn.") for i in range(self.network.input_count): for j in range(self.network.input_count): self.network.set_weight(i, j, self.sum_matrix[i][j]/self.pattern_count) ``` In the following sample problem we will implement a Hopfield network to correct distorted patterns (here: 2D images). The algorithm reads a collection of binary images (5 patterns), each image being 10x10 "pixels" in size. A pixel may either be a space ' ' or a circle 'o'. We will train a Hopfield network (size 10x10 neurons) with these images as attractors. After training, the algorithm will read another small number of images with "distortions"; i.e. with incorrect pixel patterns compared to the previously trained images. For each such "distorted" image the algorithm shall output the closest training example. ``` # The neural network will learn these patterns. PATTERN = [[ "O O O O O ", " O O O O O", "O O O O O ", " O O O O O", "O O O O O ", " O O O O O", "O O O O O ", " O O O O O", "O O O O O ", " O O O O O"], [ "OO OO OO", "OO OO OO", " OO OO ", " OO OO ", "OO OO OO", "OO OO OO", " OO OO ", " OO OO ", "OO OO OO", "OO OO OO" ], [ "OOOOO ", "OOOOO ", "OOOOO ", "OOOOO ", "OOOOO ", " OOOOO", " OOOOO", " OOOOO", " OOOOO", " OOOOO" ], [ "O O O O", " O O O ", " O O O ", "O O O O", " O O O ", " O O O ", "O O O O", " O O O ", " O O O ", "O O O O" ], [ "OOOOOOOOOO", "O O", "O OOOOOO O", "O O O O", "O O OO O O", "O O OO O O", "O O O O", "O OOOOOO O", "O O", "OOOOOOOOOO" ]] # The neural network will be tested on these patterns, to see which of the last set they are the closest to. PATTERN2 = [[ " ", " ", " ", " ", " ", " O O O O O", "O O O O O ", " O O O O O", "O O O O O ", " O O O O O"], ["OOO O O", " O OOO OO", " O O OO O", " OOO O ", "OO O OOO", " O OOO O", "O OO O O", " O OOO ", "OO OOO O ", " O O OOO"], ["OOOOO ", "O O OOO ", "O O OOO ", "O O OOO ", "OOOOO ", " OOOOO", " OOO O O", " OOO O O", " OOO O O", " OOOOO"], ["O OOOO O", "OO OOOO ", "OOO OOOO ", "OOOO OOOO", " OOOO OOO", " OOOO OO", "O OOOO O", "OO OOOO ", "OOO OOOO ", "OOOO OOOO"], ["OOOOOOOOOO", "O O", "O O", "O O", "O OO O", "O OO O", "O O", "O O", "O O", "OOOOOOOOOO"]] ``` Convert the image representation into a bipolar {-1/1} representation and display according to the original patterns ``` # Size of the network HEIGHT = 10 WIDTH = 10 def convert_pattern(data, index): result_index = 0 result = np.zeros([WIDTH*HEIGHT]) for row in range(HEIGHT): for col in range(WIDTH): ch = data[index][row][col] result[result_index] = 1 if ch != ' ' else -1 result_index += 1 return result def display(pattern1, pattern2): index1 = 0 index2 = 0 for row in range(HEIGHT): line = "" for col in range(WIDTH): if pattern1[index1]>0: line += "O" else: line += " " index1 += 1 line += " -> " for col in range(WIDTH): if pattern2[index2] >0 : line += "O" else: line += " " index2 += 1 print(line) def display_data(pattern1): index1 = 0 index2 = 0 for row in range(HEIGHT): line = "" for col in range(WIDTH): if pattern1[index1]>0: line += "O" else: line += " " index1 += 1 print(line) # Evaluate the network for the provided patterns, using a number of N steps of convergence N = 10 def evaluate(hopfield, pattern): for i in range(len(pattern)): print 'Convergence for pattern %d \n' % i pattern1 = convert_pattern(pattern, i) print 'input\n' display_data(pattern1) hopfield.current_state = pattern1 cycles = hopfield.run_until_stable(N) pattern2 = hopfield.current_state print 'attractor\n' display_data(pattern2) print("----------------------") # Create the network and train it on the first set of patterns and evaluate for both datasets (i.e. one correct and one distorted) hopfield = HopfieldNetwork(WIDTH*HEIGHT) train = TrainHopfieldHebbian(hopfield) for i in range(len(PATTERN)): train.add_pattern(convert_pattern(PATTERN, i)) train.learn() print("Evaluate distorted patterns\n") evaluate(hopfield, PATTERN2) ``` In the application of the Hopfield network as a content-addressable memory, we know a priori the fixed points (attractors) of the network in that they correspond to the patterns to be stored. However, the synaptic weights of the network that produce the desired fixed points are unknown, and the problem is how to determine them. The primary function of a content-addressable memory is to retrieve a pattern (item) stored in memory in response to the presentation of an incomplete or noisy version of that pattern. ![title](img/hopfield_energy.png) # Assignments For this assignment you should develop a Hopfield Network capable of learning a phonebook. More precisely, a simple autoassociative memory to recover names and phone numbers and/or match them. Assuming that this is the phonebook extract the network needs to learn: Code a Hopfield Network for phonebook learning and restoring using its Content-Addressable-Memory behavior. Simulate network for distorted numbers. The data is represented as: Input | Output Name -> Number TINA -> ? 86'GV | TINA -> 6843726 ANTJE -> ?Z!ES-= | ANTJE -> 8034673 LISA -> JK#XMG | LISA -> 7260915 ``` # add code here ``` Simulate network for distorted name. The data is represented as: Input | Output Number -> Name 6843726 -> ; 01, | 6843726 -> TINA 8034673 -> &;A$T | 8034673 -> ANTJE 7260915 -> N";SE | 7260915 -> LISA ``` # add code here ``` Simulate network for distorted names and numbers. The data is represented as: Input | Output Name -> Number TINE -> 1F&KV]: | TINA -> 6843726 ANNJE -> %VZAQ$> | ANTJE -> 8034673 RITA -> [)@)EK& | DIVA -> 6060737 ``` # add code here ```
github_jupyter
# 2021-05-10 Daily Practice - [x] Practice - [ ] SQL - [x] Algorithms - [ ] Solve + Design - [ ] Learn - [ ] Write - [ ] Build --- ## Practice - [x] https://leetcode.com/problems/reverse-integer/ - [x] https://leetcode.com/problems/longest-common-prefix/ - [x] https://leetcode.com/problems/maximum-subarray/ - [x] https://leetcode.com/problems/same-tree/ - [x] https://leetcode.com/problems/combination-sum/ - [x] https://leetcode.com/problems/longest-substring-without-repeating-characters/ ### Problem solving process [CSDojo problem solving tips](https://www.youtube.com/watch?v=GBuHSRDGZBY) 1. Brute-force solution 2. Think of a simpler version of the problem 3. Think with simpler examples: look for patterns 4. Use some visualization 5. Test solution on a other examples #### Problem Given two arrays of the same length, find the pair(s) of values with sums closest to the target. ``` arr1 = [-1, 3, 8, 2, 9, 5] arr2 = [4, 1, 2, 10, 5, 20] tgt = 24 # Brute-force iterative approach - O(n^2) # Iterate through every pair of elements to find the closest def find_closest_sum(arr1, arr2, tgt): closest = tgt # Can't be further away than the target itself? closest_sums = [] for i, v1 in enumerate(arr1): for j, v2 in enumerate(arr2): if abs(tgt - (v1 + v2)) <= closest: closest = tgt - (v1 + v2) closest_sums.append((v1, v2)) return closest, closest_sums find_closest_sum(arr1, arr2, tgt) # Simpler version of the problem - target sum pair exists arr3 = [-1, 3, 8, 2, 9, 4] arr4 = [4, 1, 2, 10, 5, 20] tgt2 = 24 # Use a set to check for differences def find_closest_sum(arr1, arr2, tgt): set1 = set(arr1) # Create set from first array pairs = [] for j, v2 in enumerate(arr2): # Iterate through second array # Check if target minus element is in set if (tgt - v2) in set1: pairs.append((tgt - v2, v2)) return pairs find_closest_sum(arr3, arr4, tgt) ``` Once the simpler version of the problem (where a pair exists that add up to the target) is solved, expand that solution to include any other cases that need to be accounted for (arrays without a pair that add up to the target). In this problem, if the target is not found, add or subtract 1 from the target and try again. Repeat until pair is found. > Think with simpler examples: try noticing a pattern ``` # Sorting the arrays first; start at the top of first array def find_closest_sum(arr1, arr2, tgt): arr1s, arr2s = sorted(arr1), sorted(arr2) # First pair is (arr1s[-1], arr2s[1]) # Increment second array's index # If sum is less than target, increment second array's index # If sum is more than target, decrement first array's index # if sum equals target, solution is found # Otherwise, keep track of closest pairs and return closest one after iteration is complete ``` ### Reverse integer On [LeetCode](https://leetcode.com/problems/reverse-integer/) Given a signed 32-bit integer `x`, return `x` with its digits reversed. If reversing `x` causes the value to go outside the signed 32-bit integer range ``[-2^31, 2^31 - 1]``, then return `0`. Assume the environment does not allow you to store 64-bit integers (signed or unsigned). ``` # Get components of integer 201 -> 102 # Modulo of 10 will return the ten factor - rightmost number 201 % 10 -> 1 # Remove that digit from the integer by floor division 201 // 10 -> 20 # 20 is going to be fed back into function; repeat steps above 20 % 10 -> 0 20 // 10 -> 2 # Base case: 2 % 10 = 2 # Then return that number # Reconstruct from right to left 2 * (0 + 10**0) 123 -> 321 123 % 10 = 3 123 // 10 = 12 12 % 10 = 2 12 // 10 = 1 1 % 10 = 1 # base case, return 1 1 + (2 * 10**1) = 21 21 + (3 * 10**2) = 21 + 300 = 321 import math def reverse(x): # Deal with negative case? neg = 1 if x < 0: neg = -1 x *= neg # Base case: mod 10 of x = x if x % 10 == x: return x # "Pop" rightmost number off of x right = x % 10 x_new = x // 10 # Get factor of x_new to use as exponent below factor = int(math.log(x_new, 10)) + 1 # Feed remainder back into function and reconstruct right to left rev = (reverse(x_new) + (right * 10**factor)) * neg if 2**31 < rev or rev < (-1 * (2**31)) - 1: return 0 else: return rev reverse(123) reverse(-123) int(math.log(21, 10)) int(math.log(211, 10)) ``` ### Longest common prefix On [LeetCode](https://leetcode.com/problems/longest-common-prefix/) Write a function to find the longest common prefix string amongst an array of strings. If there is no common prefix, return an empty string "". - Implement a trie - Insert words into trie - DFS for node that has multiple children ``` class TrieNode: """Node of a trie.""" def __init__(self, char: str): self.char = char # Character held by this node self.is_end = False # End of word self.children = {} # Children: key is char, value is node class Trie: """A trie object.""" def __init__(self): """Instantiate the tree with blank root node.""" self.root = TrieNode("") def insert(self, word: str) -> None: """Inserts a word into the trie; each char is a node.""" prev_node = self.root # Start at root for char in word: # Iterate through chars in word # Check if char is already a child of prev_node if char in prev_node.children: # If already exists, iterate to next char prev_node = prev_node.children[char] else: # If not, instantiate node with char; add as child to prev_node new_node = TrieNode(char) prev_node.children[char] = new_node prev_node = new_node prev_node.is_end = True # Mark end of word, in case word itself is prefix def longest_common_prefix(self, root: TrieNode): """Traverses the tree to find longest common prefix of inserted words.""" # Base case: node has multiple children or end of word -> return node.char if len(root.children) > 1 or root.is_end is True: return root.char # Recursive case: concat cur node's char with return of recursive call child = root.children[list(root.children)[0]] # Get child node return root.char + self.longest_common_prefix(child) from typing import List def longestCommonPrefix(strs: List[str]) -> str: trie = Trie() # Instantiate a trie # Loop through words, inserting them into trie for word in strs: trie.insert(word) # Call longest_common_prefix to find prefix return trie.longest_common_prefix(trie.root) longestCommonPrefix(["flower","flow","flight"]) ``` ### Max Subarray On [LeetCode](https://leetcode.com/problems/maximum-subarray/) Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum. Example 1: Input: nums = [-2,1,-3,4,-1,2,1,-5,4] Output: 6 Explanation: [4,-1,2,1] has the largest sum = 6. Example 2: Input: nums = [1] Output: 1 Example 3: Input: nums = [5,4,-1,7,8] Output: 23 ``` def max_subarray(nums): # vars to hold subarray and max sum so far max_sum = None sub = [] for i, num in enumerate(nums): # iterate through nums # check if the current value is better than the highest sum of all possible combinations of previous values if num >= sum(sub) + num: # if it's better, clear out subarray and add current value sub = [num] else: # Otherwise, add num to running subarray sub.append(num) if max_sum is None: # Deal with negative items max_sum = sum(sub) if sum(sub) > max_sum or max_sum is None: # If running sum is greater, set new max max_sum = sum(sub) return max_sum nums = [-2,1,-3,4,-1,2,1,-5,4] print(max_subarray(nums)) print(max_subarray([1])) print(max_subarray([5,4,-1,7,8])) ``` ### Same Tree On [LeetCode](https://leetcode.com/problems/same-tree/) Given the roots of two binary trees p and q, write a function to check if they are the same or not. Two binary trees are considered the same if they are structurally identical, and the nodes have the same value. ``` class Solution: def preorderTraversal(self, node) -> list: # Base case: node is None if node is None: return [None] # Recursive case: [this node.val, pt(left.val), pt.right.val] return [node.val] + self.preorderTraversal(node.left) + self.preorderTraversal(node.right) def isSameTree(self, p: TreeNode, q: TreeNode) -> bool: """If output of traversal is equal, then they are the same.""" if self.preorderTraversal(p) == self.preorderTraversal(q): return True else: return False ``` ### Combination Sum (Again) ``` class Solution: def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: valid_paths = [] self.pathSearch(candidates, 0, target, [], valid_paths) return valid_paths def pathSearch(self, candidates, start, target, path, valid_paths): # Base case: target / remainder less than 0 if target < 0: return # Base case: target = 0 -> path is valid if target == 0: valid_paths.append(path) return # Recursive case: iterate through candidates starting with start for i, cand in enumerate(candidates): path.append(cand) # Add current search node to path # Recurse self.pathSearch(candidates, i, target - cand, path, valid_paths) # Remove search node from path path.pop() class Solution: def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: self.valid_paths = [] self.path = [] self.pathSearch(candidates, 0, target) return self.valid_paths def pathSearch(self, candidates, start, target): # Base case: target / remainder less than 0 if target < 0: return # Base case: target = 0 -> path is valid if target == 0: self.valid_paths.append(self.path) return # Recursive case: iterate through candidates starting with start for i, cand in enumerate(candidates): self.path.append(cand) # Add current search node to path self.pathSearch(candidates, i, target - cand) # Recurse # Remove search node from path self.path.pop() class Solution: def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: paths = [] self.pathSearch(candidates, 0, target, [], paths) return paths def pathSearch(self, candidates, start, target, path, paths): # Base case: target / remainder less than 0 if target < 0: return # Base case: target = 0 -> path is valid if target == 0: paths.append(list(path)) return # Recursive case: iterate through candidates starting with start for i, cand in enumerate(candidates): path.append(cand) # Add current search node to path self.pathSearch(candidates[start:], i, target - cand, path, paths) # Recurse path.pop() class Solution: def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: paths = [] self.pathSearch(candidates, target, [], paths) return paths def pathSearch(self, candidates, target, path, paths): # Base case: target / remainder less than 0 if target < 0: return # Base case: target = 0 -> path is valid if target == 0: paths.append(list(path)) return # Recursive case: iterate through candidates starting with start for i, cand in enumerate(candidates): path.append(cand) # Add current search node to path self.pathSearch(candidates[i:], target - cand, path, paths) # Recurse path.pop() candidates = [2, 3, 6, 7] sol = Solution() sol.combinationSum(candidates, 7) candidates = [2,3,5] sol = Solution() sol.combinationSum(candidates, 8) [2, 3, 3] is [3, 3, 2] ``` ## Data Structures Review ### LinkedList Singly linked list with recursive methods. ``` class LinkedListNode: def __init__(self, data=None, next=None): self.data = data self.next = next def append(self, data) -> None: if self.next is None: # Base case, no next node self.next = LinkedListNode(data) else: self.next.append(data) class LinkedList: def __init__(self, head=None): self.head = head def append(self, data) -> None: if self.head: self.head.append(data) else: self.head = LinkedListNode(data) a = LinkedListNode(1) my_ll = LinkedList(a) my_ll.append(2) my_ll.append(3) print(my_ll.head.data) print(my_ll.head.next.data) print(my_ll.head.next.next.data) ``` ### Queue FIFO! - Enqueue: constant time - `O(1)` - Dequeue: constant time - `O(1)` - Peek: constant time - `O(1)` - Space complexity = `O(n)` ``` class Queue: def __init__(self): self.front = None self.back = None def is_empty(self) -> bool: if self.front is None: return True else: return False def enqueue(self, data): new_node = LinkedListNode(data) if self.is_empty(): self.front = new_node else: self.back.next = new_node self.back = new_node # Send new node to back of queue def dequeue(self): """Remove node from front of list and return its value.""" if not self.is_empty(): # Check if queue is empty dq = self.front # Save current front of queue self.front = dq.next # Set next node as new front else: return None # Return None if queue is empty # Check if queue is empty after dequeue if self.is_empty(): self.back = None # Also clear out back return dq.data # Return old front's data def peek(self): if not self.is_empty(): return self.front.data ``` ### Stack LIFO! - Push: constant time - `O(1)` - Pop: constant time - `O(1)` - Peek: constant time - `O(1)` - Space complexity = `O(n)` ``` class Stack: def __init__(self): self.top = None def push(self, data): """Adds element to top of stack.""" new_node = LinkedListNode(data) new_node.next = self.top self.top = new_node def pop(self): """Removes element from top of stack and returns its value.""" if self.top: popped = self.top self.top = popped.next return popped.data else: return None def peek(self): """Return value of the stack's top element without removing it.""" peeked = None if self.top: peeked = self.top.data return peeked ``` ### Binary Search Tree First, I'm going to implement a BST from scratch, run DFS and BFS on it, then look for a good leetcode problem to apply it to. ``` import math # Perfect binary tree math # Given 127 nodes, what is the height? print(math.log(127 + 1, 2)) # Given height of 8, how many nodes does it have? print(2 ** 8 - 1) class BSTNode: def __init__(self, val: int): self.val = val self.left = None self.right = None def __str__(self): print(f"<({self.left})-({self.val})-({self.right})>") def insert(self, val) -> None: if val < self.val: if self.left is None: self.left = BSTNode(val) else: self.left.insert(val) if val > self.val: if self.right is None: self.right = BSTNode(val) else: self.right.insert(val) def search(self, tgt: int): if self.val == tgt: return self elif tgt < self.val: if self.left is None: return False else: return self.left.search(tgt) else: if self.right is None: return False else: return self.right.search(tgt) def min(self): # Find minimum by going all the way left if self.left is None: # Base case: no more left to go return self else: # Recursive case: call left node's min method return self.left.min() class BST: def __init__(self, root_val: int): self.root = BSTNode(root_val) def insert(self, val: int) -> None: self.root.insert(val) def search(self, val: int) -> BSTNode: return self.root.search(val) def min(self, node: BSTNode): return node.min() def delete(self, val: int) -> None: pass ``` #### Traversals - Breadth-first - Depth-first - Inorder: Node visited in order (l->n->r) - Preorder: Node visited before children (n->l->r) - Postorder: Node visited after children (l->r->n) ``` from collections import deque def breadth_first_traversal(root): if root is None: return [] results = [] q = deque() q.append(root) while len(q) > 0: node = q.popleft() results.append(node.val) # Put children into the queue if node.left: q.append(node.left) if node.right: q.append(node.right) return results ``` ### Longest substring without repeating characters On [LeetCode](https://leetcode.com/problems/longest-substring-without-repeating-characters/) I believe I have a good method for solving this one now: using a queue as a way to set up a sliding window. I can iterate through the string, adding each character to the queue. If the character matches the character at the front of the queue, dequeue the char off the front. Keep track of the max length of the queue and return it at the end. ``` from collections import deque class Solution: def lengthOfLongestSubstring(self, s: str) -> int: max = 0 # Keep track of max queue length q = deque() # Use queue as sliding window for char in s: # Iterate through string # If char being added matches that at front of queue, dequeue it first if len(q) > 0: if char in q: # Find index of char; dequeue that many elements ix = q.index(char) for i in range(ix + 1): q.popleft() q.append(char) # Add char to queue # Compare length of queue to max, setting max accordingly if len(q) > max: max = len(q) print(q) return max s = "abcabcbb" sol = Solution() sol.lengthOfLongestSubstring(s) d = deque(s) for i in range(d.index("b") + 1): d.popleft() d ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_parent" href="https://github.com/giswqs/geemap/tree/master/tutorials/Image/06_convolutions.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_parent" href="https://nbviewer.jupyter.org/github/giswqs/geemap/blob/master/tutorials/Image/06_convolutions.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_parent" href="https://colab.research.google.com/github/giswqs/geemap/blob/master/tutorials/Image/06_convolutions.ipynb"><img width=26px src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> # Convolutions To perform linear convolutions on images, use `image.convolve()`. The only argument to convolve is an `ee.Kernel` which is specified by a shape and the weights in the kernel. Each pixel of the image output by `convolve()` is the linear combination of the kernel values and the input image pixels covered by the kernel. The kernels are applied to each band individually. For example, you might want to use a low-pass (smoothing) kernel to remove high-frequency information. The following illustrates a 15x15 low-pass kernel applied to a Landsat 8 image: ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.foliumap as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40, -100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Load and display an image. image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318') Map.setCenter(-121.9785, 37.8694, 11) Map.addLayer(image, {'bands': ['B5', 'B4', 'B3'], 'max': 0.5}, 'input image') # Define a boxcar or low-pass kernel. # boxcar = ee.Kernel.square({ # 'radius': 7, 'units': 'pixels', 'normalize': True # }) boxcar = ee.Kernel.square(7, 'pixels', True) # Smooth the image by convolving with the boxcar kernel. smooth = image.convolve(boxcar) Map.addLayer(smooth, {'bands': ['B5', 'B4', 'B3'], 'max': 0.5}, 'smoothed') Map.addLayerControl() Map ``` The output of convolution with the low-pass filter should look something like Figure 1. Observe that the arguments to the kernel determine its size and coefficients. Specifically, with the `units` parameter set to pixels, the `radius` parameter specifies the number of pixels from the center that the kernel will cover. If `normalize` is set to true, the kernel coefficients will sum to one. If the `magnitude` parameter is set, the kernel coefficients will be multiplied by the magnitude (if `normalize` is also true, the coefficients will sum to `magnitude`). If there is a negative value in any of the kernel coefficients, setting `normalize` to true will make the coefficients sum to zero. Use other kernels to achieve the desired image processing effect. This example uses a Laplacian kernel for isotropic edge detection: ``` Map = emap.Map(center=[40, -100], zoom=4) # Define a Laplacian, or edge-detection kernel. laplacian = ee.Kernel.laplacian8(1, False) # Apply the edge-detection kernel. edgy = image.convolve(laplacian) Map.addLayer(edgy, {'bands': ['B5', 'B4', 'B3'], 'max': 0.5}, 'edges') Map.setCenter(-121.9785, 37.8694, 11) Map.addLayerControl() Map ``` Note the format specifier in the visualization parameters. Earth Engine sends display tiles to the Code Editor in JPEG format for efficiency, however edge tiles are sent in PNG format to handle transparency of pixels outside the image boundary. When a visual discontinuity results, setting the format to PNG results in a consistent display. The result of convolving with the Laplacian edge detection kernel should look something like Figure 2. There are also anisotropic edge detection kernels (e.g. Sobel, Prewitt, Roberts), the direction of which can be changed with `kernel.rotate()`. Other low pass kernels include a Gaussian kernel and kernels of various shape with uniform weights. To create kernels with arbitrarily defined weights and shape, use `ee.Kernel.fixed()`. For example, this code creates a 9x9 kernel of 1’s with a zero in the middle: ``` # Create a list of weights for a 9x9 kernel. list = [1, 1, 1, 1, 1, 1, 1, 1, 1] # The center of the kernel is zero. centerList = [1, 1, 1, 1, 0, 1, 1, 1, 1] # Assemble a list of lists: the 9x9 kernel weights as a 2-D matrix. lists = [list, list, list, list, centerList, list, list, list, list] # Create the kernel from the weights. kernel = ee.Kernel.fixed(9, 9, lists, -4, -4, False) print(kernel.getInfo()) ```
github_jupyter
<h1 align="center">Theano</h1> ``` !pip install numpy matplotlib !pip install --upgrade https://github.com/Theano/Theano/archive/master.zip !pip install --upgrade https://github.com/Lasagne/Lasagne/archive/master.zip ``` ### Разминка ``` import theano import theano.tensor as T %pylab inline ``` #### будущий параметр функции -- символьная переменная ``` N = T.scalar('a dimension', dtype='float32') ``` #### рецепт получения квадрата -- орперации над символьными переменным ``` result = T.power(N, 2) ``` #### theano.grad(cost, wrt) ``` grad_result = theano.grad(result, N) ``` #### компиляция функции "получения квадрата" ``` sq_function = theano.function(inputs=[N], outputs=result) gr_function = theano.function(inputs=[N], outputs=grad_result) ``` #### применение функции ``` # Заводим np.array x xv = np.arange(-10, 10) # Применяем функцию к каждому x val = map(float, [sq_function(x) for x in xv]) # Посичтаем градиент в кажой точке grad = map(float, [gr_function(x) for x in xv]) ``` ### Что мы увидим если нарисуем функцию и градиент? ``` pylab.plot(xv, val, label='x*x') pylab.plot(xv, grad, label='d x*x / dx') pylab.legend() ``` <h1 align="center">Lasagne</h1> * lasagne - это библиотека для написания нейронок произвольной формы на theano * В качестве демо-задачи выберем то же распознавание чисел, но на большем масштабе задачи, картинки 28x28, 10 цифр ``` from mnist import load_dataset X_train, y_train, X_val, y_val, X_test, y_test = load_dataset() print 'X размера', X_train.shape, 'y размера', y_train.shape fig, axes = plt.subplots(nrows=1, ncols=7, figsize=(20, 20)) for i, ax in enumerate(axes): ax.imshow(X_train[i, 0], cmap='gray') ``` Давайте посмотрим на DenseLayer в lasagne - http://lasagne.readthedocs.io/en/latest/modules/layers/dense.html - https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/dense.py#L16-L124 - Весь содаржательный код тут https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/dense.py#L121 ``` import lasagne from lasagne import init from theano import tensor as T from lasagne.nonlinearities import softmax X, y = T.tensor4('X'), T.vector('y', 'int32') ``` Так задаётся архитектура нейронки ``` #входной слой (вспомогательный) net = lasagne.layers.InputLayer(shape=(None, 1, 28, 28), input_var=X) net = lasagne.layers.Conv2DLayer(net, 15, 28, pad='valid', W=init.Constant()) # сверточный слой net = lasagne.layers.Conv2DLayer(net, 10, 2, pad='full', W=init.Constant()) # сверточный слой net = lasagne.layers.DenseLayer(net, num_units=500) # полносвязный слой net = lasagne.layers.DropoutLayer(net, 1.0) # регуляризатор net = lasagne.layers.DenseLayer(net, num_units=200) # полносвязный слой net = lasagne.layers.DenseLayer(net, num_units=10) # полносвязный слой #предсказание нейронки (theano-преобразование) y_predicted = lasagne.layers.get_output(net) #все веса нейронки (shared-переменные) all_weights = lasagne.layers.get_all_params(net) print all_weights #функция ошибки и точности будет прямо внутри loss = lasagne.objectives.categorical_accuracy(y_predicted, y).mean() accuracy = lasagne.objectives.categorical_accuracy(y_predicted, y).mean() #сразу посчитать словарь обновлённых значений с шагом по градиенту, как раньше updates = lasagne.updates.momentum(loss, all_weights, learning_rate=1.0, momentum=1.5) #функция, делает updates и возвращащет значение функции потерь и точности train_fun = theano.function([X, y], [loss, accuracy], updates=updates) accuracy_fun = theano.function([X, y], accuracy) # точность без обновления весов, для теста ``` # Процесс обучения ``` import time from mnist import iterate_minibatches num_epochs = 5 #количество проходов по данным batch_size = 50 #размер мини-батча for epoch in range(num_epochs): train_err, train_acc, train_batches, start_time = 0, 0, 0, time.time() for inputs, targets in iterate_minibatches(X_train, y_train, batch_size): train_err_batch, train_acc_batch = train_fun(inputs, targets) train_err += train_err_batch train_acc += train_acc_batch train_batches += 1 val_acc, val_batches = 0, 0 for inputs, targets in iterate_minibatches(X_test, y_test, batch_size): val_acc += accuracy_fun(inputs, targets) val_batches += 1 print "Epoch %s of %s took %.3f s" % (epoch + 1, num_epochs, time.time() - start_time) print " train loss:\t %.3f" % (train_err / train_batches) print " train acc:\t %.3f" % (train_acc * 100 / train_batches), '%' print " test acc:\t %.3f" % (val_acc * 100 / val_batches), '%' print test_acc = 0 test_batches = 0 for batch in iterate_minibatches(X_test, y_test, 500): inputs, targets = batch acc = accuracy_fun(inputs, targets) test_acc += acc test_batches += 1 print("Final results: \n test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100)) ``` # Ансамблирование с DropOut ``` #предсказание нейронки (theano-преобразование) y_predicted = T.mean([lasagne.layers.get_output(net, deterministic=False) for i in range(10)], axis=0) accuracy = lasagne.objectives.categorical_accuracy(y_predicted, y).mean() accuracy_fun = theano.function([X, y], accuracy) # точность без обновления весов, для теста test_acc = 0 test_batches = 0 for batch in iterate_minibatches(X_test, y_test, 500): inputs, targets = batch acc = accuracy_fun(inputs, targets) test_acc += acc test_batches += 1 print("Final results: \n test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100)) ```
github_jupyter
查看当前GPU信息 ``` from tensorflow.python.client import device_lib device_lib.list_local_devices() !pip install bert-tensorflow import pandas as pd import tensorflow as tf import tensorflow_hub as hub import pickle import bert from bert import run_classifier from bert import optimization from bert import tokenization def pretty_print(result): df = pd.DataFrame([result]).T df.columns = ["values"] return df def create_tokenizer_from_hub_module(bert_model_hub): """Get the vocab file and casing info from the Hub module.""" with tf.Graph().as_default(): bert_module = hub.Module(bert_model_hub) tokenization_info = bert_module(signature="tokenization_info", as_dict=True) with tf.Session() as sess: vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"], tokenization_info["do_lower_case"]]) return bert.tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) def make_features(dataset, label_list, MAX_SEQ_LENGTH, tokenizer, DATA_COLUMN, LABEL_COLUMN): input_example = dataset.apply(lambda x: bert.run_classifier.InputExample(guid=None, text_a = x[DATA_COLUMN], text_b = None, label = x[LABEL_COLUMN]), axis = 1) features = bert.run_classifier.convert_examples_to_features(input_example, label_list, MAX_SEQ_LENGTH, tokenizer) return features def create_model(bert_model_hub, is_predicting, input_ids, input_mask, segment_ids, labels, num_labels): """Creates a classification model.""" bert_module = hub.Module( bert_model_hub, trainable=True) bert_inputs = dict( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids) bert_outputs = bert_module( inputs=bert_inputs, signature="tokens", as_dict=True) # Use "pooled_output" for classification tasks on an entire sentence. # Use "sequence_outputs" for token-level output. output_layer = bert_outputs["pooled_output"] hidden_size = output_layer.shape[-1].value # Create our own layer to tune for politeness data. output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): # Dropout helps prevent overfitting output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) # Convert labels into one-hot encoding one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32)) # If we're predicting, we want predicted labels and the probabiltiies. if is_predicting: return (predicted_labels, log_probs) # If we're train/eval, compute loss between predicted and actual label per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, predicted_labels, log_probs) # model_fn_builder actually creates our model function # using the passed parameters for num_labels, learning_rate, etc. def model_fn_builder(bert_model_hub, num_labels, learning_rate, num_train_steps, num_warmup_steps): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_predicting = (mode == tf.estimator.ModeKeys.PREDICT) # TRAIN and EVAL if not is_predicting: (loss, predicted_labels, log_probs) = create_model( bert_model_hub, is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) train_op = bert.optimization.create_optimizer( loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False) # Calculate evaluation metrics. def metric_fn(label_ids, predicted_labels): accuracy = tf.metrics.accuracy(label_ids, predicted_labels) f1_score = tf.contrib.metrics.f1_score( label_ids, predicted_labels) auc = tf.metrics.auc( label_ids, predicted_labels) recall = tf.metrics.recall( label_ids, predicted_labels) precision = tf.metrics.precision( label_ids, predicted_labels) true_pos = tf.metrics.true_positives( label_ids, predicted_labels) true_neg = tf.metrics.true_negatives( label_ids, predicted_labels) false_pos = tf.metrics.false_positives( label_ids, predicted_labels) false_neg = tf.metrics.false_negatives( label_ids, predicted_labels) return { "eval_accuracy": accuracy, "f1_score": f1_score, "auc": auc, "precision": precision, "recall": recall, "true_positives": true_pos, "true_negatives": true_neg, "false_positives": false_pos, "false_negatives": false_neg } eval_metrics = metric_fn(label_ids, predicted_labels) if mode == tf.estimator.ModeKeys.TRAIN: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) else: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) else: (predicted_labels, log_probs) = create_model( bert_model_hub, is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) predictions = { 'probabilities': log_probs, 'labels': predicted_labels } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Return the actual model function in the closure return model_fn def estimator_builder(bert_model_hub, OUTPUT_DIR, SAVE_SUMMARY_STEPS, SAVE_CHECKPOINTS_STEPS, label_list, LEARNING_RATE, num_train_steps, num_warmup_steps, BATCH_SIZE): # Specify outpit directory and number of checkpoint steps to save run_config = tf.estimator.RunConfig( model_dir=OUTPUT_DIR, save_summary_steps=SAVE_SUMMARY_STEPS, save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS) model_fn = model_fn_builder( bert_model_hub = bert_model_hub, num_labels=len(label_list), learning_rate=LEARNING_RATE, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps) estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params={"batch_size": BATCH_SIZE}) return estimator, model_fn, run_config def run_on_dfs(train, test, DATA_COLUMN, LABEL_COLUMN, MAX_SEQ_LENGTH = 128, BATCH_SIZE = 32, LEARNING_RATE = 2e-5, NUM_TRAIN_EPOCHS = 3.0, WARMUP_PROPORTION = 0.1, SAVE_SUMMARY_STEPS = 100, SAVE_CHECKPOINTS_STEPS = 10000, bert_model_hub = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"): label_list = train[LABEL_COLUMN].unique().tolist() tokenizer = create_tokenizer_from_hub_module(bert_model_hub) train_features = make_features(train, label_list, MAX_SEQ_LENGTH, tokenizer, DATA_COLUMN, LABEL_COLUMN) test_features = make_features(test, label_list, MAX_SEQ_LENGTH, tokenizer, DATA_COLUMN, LABEL_COLUMN) num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS) num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION) estimator, model_fn, run_config = estimator_builder( bert_model_hub, OUTPUT_DIR, SAVE_SUMMARY_STEPS, SAVE_CHECKPOINTS_STEPS, label_list, LEARNING_RATE, num_train_steps, num_warmup_steps, BATCH_SIZE) train_input_fn = bert.run_classifier.input_fn_builder( features=train_features, seq_length=MAX_SEQ_LENGTH, is_training=True, drop_remainder=False) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) test_input_fn = run_classifier.input_fn_builder( features=test_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False) result_dict = estimator.evaluate(input_fn=test_input_fn, steps=None) return result_dict, estimator import random random.seed(10) OUTPUT_DIR = 'output' ``` ----- 只需更改下方代码 ------ 导入数据集 ``` !wget https://github.com/yaoyue123/SocialComputing/raw/master/spam_message/training.txt !wget https://github.com/yaoyue123/SocialComputing/raw/master/spam_message/validation.txt train = pd.read_table("training.txt",sep='\t',error_bad_lines=False) #mytrain= mytrain[order] test = pd.read_table("validation.txt",sep='\t',error_bad_lines=False) #mytest= mytest[order] train.head() test.head() ``` 在此更改你的参数,如标签,bert模型地址,epochs ``` myparam = { "DATA_COLUMN": "massage", "LABEL_COLUMN": "label", "LEARNING_RATE": 2e-5, "NUM_TRAIN_EPOCHS":1, "bert_model_hub":"https://tfhub.dev/google/bert_chinese_L-12_H-768_A-12/1" } ``` 训练模型,通常情况下,一个epochs用k80训练大概在10min左右 ``` result, estimator = run_on_dfs(train, test, **myparam) ``` bert模型还是比较强的,一个epochs就能达到准确率为99% ``` pretty_print(result) ```
github_jupyter
# Anna KaRNNa In this notebook, I'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book. This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Also, some information [here at r2rt](http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html) and from [Sherjil Ozair](https://github.com/sherjilozair/char-rnn-tensorflow) on GitHub. Below is the general architecture of the character-wise RNN. <img src="assets/charseq.jpeg" width="500"> ``` import time from collections import namedtuple import numpy as np import tensorflow as tf ``` First we'll load the text file and convert it into integers for our network to use. ``` with open('anna.txt', 'r') as f: text=f.read() vocab = set(text) vocab_to_int = {c: i for i, c in enumerate(vocab)} int_to_vocab = dict(enumerate(vocab)) chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32) text[:100] chars[:100] ``` Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text. Here I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches. The idea here is to make a 2D matrix where the number of rows is equal to the number of batches. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the `split_frac` keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set. ``` def split_data(chars, batch_size, num_steps, split_frac=0.9): """ Split character data into training and validation sets, inputs and targets for each set. Arguments --------- chars: character array batch_size: Size of examples in each of batch num_steps: Number of sequence steps to keep in the input and pass to the network split_frac: Fraction of batches to keep in the training set Returns train_x, train_y, val_x, val_y """ slice_size = batch_size * num_steps n_batches = int(len(chars) / slice_size) # Drop the last few characters to make only full batches x = chars[: n_batches*slice_size] y = chars[1: n_batches*slice_size + 1] # Split the data into batch_size slices, then stack them into a 2D matrix x = np.stack(np.split(x, batch_size)) y = np.stack(np.split(y, batch_size)) # Now x and y are arrays with dimensions batch_size x n_batches*num_steps # Split into training and validation sets, keep the virst split_frac batches for training split_idx = int(n_batches*split_frac) train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps] val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:] return train_x, train_y, val_x, val_y train_x, train_y, val_x, val_y = split_data(chars, 10, 200) train_x.shape train_x[:,:10] ``` I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size `batch_size X num_steps`. For example, if we want our network to train on a sequence of 100 characters, `num_steps = 100`. For the next batch, we'll shift this window the next sequence of `num_steps` characters. In this way we can feed batches to the network and the cell states will continue through on each batch. ``` def get_batch(arrs, num_steps): batch_size, slice_size = arrs[0].shape n_batches = int(slice_size/num_steps) for b in range(n_batches): yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs] def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2, learning_rate=0.001, grad_clip=5, sampling=False): if sampling == True: batch_size, num_steps = 1, 1 tf.reset_default_graph() # Declare placeholders we'll feed into the graph inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs') x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot') targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets') y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot') y_reshaped = tf.reshape(y_one_hot, [-1, num_classes]) keep_prob = tf.placeholder(tf.float32, name='keep_prob') # Build the RNN layers lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size) drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob) cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers) initial_state = cell.zero_state(batch_size, tf.float32) # Run the data through the RNN layers outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state) final_state = state # Reshape output so it's a bunch of rows, one row for each cell output seq_output = tf.concat(outputs, axis=1,name='seq_output') output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output') # Now connect the RNN putputs to a softmax layer and calculate the cost softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1), name='softmax_w') softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b') logits = tf.matmul(output, softmax_w) + softmax_b preds = tf.nn.softmax(logits, name='predictions') loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss') cost = tf.reduce_mean(loss, name='cost') # Optimizer for training, using gradient clipping to control exploding gradients tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip) train_op = tf.train.AdamOptimizer(learning_rate) optimizer = train_op.apply_gradients(zip(grads, tvars)) # Export the nodes export_nodes = ['inputs', 'targets', 'initial_state', 'final_state', 'keep_prob', 'cost', 'preds', 'optimizer'] Graph = namedtuple('Graph', export_nodes) local_dict = locals() graph = Graph(*[local_dict[each] for each in export_nodes]) return graph ``` ## Hyperparameters Here I'm defining the hyperparameters for the network. The two you probably haven't seen before are `lstm_size` and `num_layers`. These set the number of hidden units in the LSTM layers and the number of LSTM layers, respectively. Of course, making these bigger will improve the network's performance but you'll have to watch out for overfitting. If your validation loss is much larger than the training loss, you're probably overfitting. Decrease the size of the network or decrease the dropout keep probability. ``` batch_size = 100 num_steps = 100 lstm_size = 512 num_layers = 2 learning_rate = 0.001 ``` ## Write out the graph for TensorBoard ``` model = build_rnn(len(vocab), batch_size=batch_size, num_steps=num_steps, learning_rate=learning_rate, lstm_size=lstm_size, num_layers=num_layers) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) file_writer = tf.summary.FileWriter('./logs/1', sess.graph) ``` ## Training Time for training which is is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by `save_every_n`) I calculate the validation loss and save a checkpoint. ``` !mkdir -p checkpoints/anna epochs = 1 save_every_n = 200 train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps) model = build_rnn(len(vocab), batch_size=batch_size, num_steps=num_steps, learning_rate=learning_rate, lstm_size=lstm_size, num_layers=num_layers) saver = tf.train.Saver(max_to_keep=100) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # Use the line below to load a checkpoint and resume training #saver.restore(sess, 'checkpoints/anna20.ckpt') n_batches = int(train_x.shape[1]/num_steps) iterations = n_batches * epochs for e in range(epochs): # Train network new_state = sess.run(model.initial_state) loss = 0 for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1): iteration = e*n_batches + b start = time.time() feed = {model.inputs: x, model.targets: y, model.keep_prob: 0.5, model.initial_state: new_state} batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer], feed_dict=feed) loss += batch_loss end = time.time() print('Epoch {}/{} '.format(e+1, epochs), 'Iteration {}/{}'.format(iteration, iterations), 'Training loss: {:.4f}'.format(loss/b), '{:.4f} sec/batch'.format((end-start))) if (iteration%save_every_n == 0) or (iteration == iterations): # Check performance, notice dropout has been set to 1 val_loss = [] new_state = sess.run(model.initial_state) for x, y in get_batch([val_x, val_y], num_steps): feed = {model.inputs: x, model.targets: y, model.keep_prob: 1., model.initial_state: new_state} batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed) val_loss.append(batch_loss) print('Validation loss:', np.mean(val_loss), 'Saving checkpoint!') saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss))) tf.train.get_checkpoint_state('checkpoints/anna') ``` ## Sampling Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that. The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters. ``` def pick_top_n(preds, vocab_size, top_n=5): p = np.squeeze(preds) p[np.argsort(p)[:-top_n]] = 0 p = p / np.sum(p) c = np.random.choice(vocab_size, 1, p=p)[0] return c def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "): prime = "Far" samples = [c for c in prime] model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, checkpoint) new_state = sess.run(model.initial_state) for c in prime: x = np.zeros((1, 1)) x[0,0] = vocab_to_int[c] feed = {model.inputs: x, model.keep_prob: 1., model.initial_state: new_state} preds, new_state = sess.run([model.preds, model.final_state], feed_dict=feed) c = pick_top_n(preds, len(vocab)) samples.append(int_to_vocab[c]) for i in range(n_samples): x[0,0] = c feed = {model.inputs: x, model.keep_prob: 1., model.initial_state: new_state} preds, new_state = sess.run([model.preds, model.final_state], feed_dict=feed) c = pick_top_n(preds, len(vocab)) samples.append(int_to_vocab[c]) return ''.join(samples) checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt" samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far") print(samp) checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt" samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far") print(samp) checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt" samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far") print(samp) checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt" samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far") print(samp) ```
github_jupyter
## Change sys.path to use my tensortrade instead of the one in env ``` import sys sys.path.append("/Users/jasonfiacco/Documents/Yale/Senior/thesis/deeptrader") print(sys.path) ``` ## Read PredictIt Data Instead ``` import ssl import pandas as pd ssl._create_default_https_context = ssl._create_unverified_context # Only used if pandas gives a SSLError def fetch_data(symbol): path = "/Users/jasonfiacco/Documents/Yale/Senior/thesis/predictit_datasets/" filename = "{}.xlsx".format(symbol) df = pd.read_excel(path + filename, skiprows=4) df = df.set_index("Date") df = df.drop(df.columns[[7,8,9]], axis=1) df = df.drop("ID", 1) df.columns = [symbol + ":" + name.lower() for name in df.columns] return df all_data = pd.concat([ fetch_data("WARREN"), fetch_data("CRUZ"), fetch_data("MANCHIN"), fetch_data("SANDERS"), fetch_data("NELSON"), fetch_data("DONNELLY"), fetch_data("PELOSI"), fetch_data("MANAFORT"), fetch_data("BROWN"), fetch_data("RYAN"), fetch_data("STABENOW") ], axis=1) all_data.head() ``` ## Plot the closing prices for all the markets ``` %matplotlib inline closing_prices = all_data.loc[:, [("close" in name) for name in all_data.columns]] closing_prices.plot() ``` ## Slice just a specific time period from the dataframe ``` all_data.index = pd.to_datetime(all_data.index) subset_data = all_data[(all_data.index >= '09-01-2017') & (all_data.index <= '09-04-2019')] subset_data.head() ``` ## Define Exchanges An exchange needs a name, an execution service, and streams of price data in order to function properly. The setups supported right now are the simulated execution service using simulated or stochastic data. More execution services will be made available in the future, as well as price streams so that live data and execution can be supported. ``` from tensortrade.exchanges import Exchange from tensortrade.exchanges.services.execution.simulated import execute_order from tensortrade.data import Stream #Exchange(name of exchange, service) #It looks like each Stream takes a name, and then a list of the closing prices. predictit_exch = Exchange("predictit", service=execute_order)( Stream("USD-WARREN", list(subset_data['WARREN:close'])), Stream("USD-CRUZ", list(subset_data['CRUZ:close'])), Stream("USD-MANCHIN", list(subset_data['MANCHIN:close'])), Stream("USD-SANDERS", list(subset_data['SANDERS:close'])), Stream("USD-NELSON", list(subset_data['NELSON:close'])), Stream("USD-DONNELLY", list(subset_data['DONNELLY:close'])), Stream("USD-PELOSI", list(subset_data['PELOSI:close'])), Stream("USD-MANAFORT", list(subset_data['MANAFORT:close'])), Stream("USD-BROWN", list(subset_data['BROWN:close'])), Stream("USD-RYAN", list(subset_data['RYAN:close'])), Stream("USD-STABENOW", list(subset_data['STABENOW:close'])) ) ``` Now that the exchanges have been defined we can define our features that we would like to include, excluding the prices we have provided for the exchanges. ### Doing it without adding other features. Just use price ``` #You still have to add "Streams" for all the standard columns open, high, low, close, volume in this case from tensortrade.data import DataFeed, Module with Module("predictit") as predictit_ns: predictit_nodes = [Stream(name, list(subset_data[name])) for name in subset_data.columns] #Then create the Feed from it feed = DataFeed([predictit_ns]) feed.next() ``` ## Portfolio Make the portfolio using the any combinations of exchanges and intruments that the exchange supports ``` #I am going to have to add "instruments" for all 25 of the PredictIt markets I'm working with. from tensortrade.instruments import USD, WARREN, CRUZ, MANCHIN, SANDERS, NELSON, DONNELLY,\ PELOSI, MANAFORT, BROWN, RYAN, STABENOW from tensortrade.wallets import Wallet, Portfolio portfolio = Portfolio(USD, [ Wallet(predictit_exch, 10000 * USD), Wallet(predictit_exch, 0 * WARREN), Wallet(predictit_exch, 0 * CRUZ), Wallet(predictit_exch, 0 * MANCHIN), Wallet(predictit_exch, 0 * SANDERS), Wallet(predictit_exch, 0 * NELSON), Wallet(predictit_exch, 0 * DONNELLY), Wallet(predictit_exch, 0 * PELOSI), Wallet(predictit_exch, 0 * MANAFORT), Wallet(predictit_exch, 0 * BROWN), Wallet(predictit_exch, 0 * RYAN), Wallet(predictit_exch, 0 * STABENOW) ]) ``` ## Environment ``` from tensortrade.environments import TradingEnvironment env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme='simple', reward_scheme='simple', window_size=15, enable_logger=False, renderers = 'screenlog' ) env.feed.next() ``` #### ^An environment doesn't just show the OHLCV for each instrument. It also shows free, locked, total, as well as "USD_BTC" ## Using 123's Ray example ``` import os parent_dir = "/Users/jasonfiacco/Documents/Yale/Senior/thesis/deeptrader" os.environ["PYTHONPATH"] = parent_dir + ":" + os.environ.get("PYTHONPATH", "") !PYTHONWARNINGS=ignore::yaml.YAMLLoadWarning #Import tensortrade import tensortrade # Define Exchanges from tensortrade.exchanges import Exchange from tensortrade.exchanges.services.execution.simulated import execute_order from tensortrade.data import Stream # Define External Data Feed (features) import ta from sklearn import preprocessing from tensortrade.data import DataFeed, Module # Portfolio from tensortrade.instruments import USD, BTC from tensortrade.wallets import Wallet, Portfolio from tensortrade.actions import ManagedRiskOrders from gym.spaces import Discrete # Environment from tensortrade.environments import TradingEnvironment import gym import ray from ray import tune from ray.tune import grid_search from ray.tune.registry import register_env import ray.rllib.agents.ppo as ppo import ray.rllib.agents.dqn as dqn from ray.tune.logger import pretty_print from tensortrade.rewards import RiskAdjustedReturns class RayTradingEnv(TradingEnvironment): def __init__(self): env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme="simple", reward_scheme="simple", window_size=15, enable_logger=False, renderers = 'screenlog' ) self.env = env self.action_space = self.env.action_space self.observation_space = self.env.observation_space def reset(self): return self.env.reset() def step(self, action): return self.env.step(action) def env_creator(env_config): return RayTradingEnv() register_env("ray_trading_env", env_creator) ray.init(ignore_reinit_error=True) config = dqn.DEFAULT_CONFIG.copy() config["num_gpus"] = 0 #config["num_workers"] = 4 #config["num_envs_per_worker"] = 8 # config["eager"] = False # config["timesteps_per_iteration"] = 100 # config["train_batch_size"] = 20 #config['log_level'] = "DEBUG" trainer = dqn.DQNTrainer(config=config, env="ray_trading_env") config ``` ## Train using the old fashioned RLLib way ``` for i in range(10): # Perform one iteration of training the policy with PPO print("Training iteration {}...".format(i)) result = trainer.train() print("result: {}".format(result)) if i % 100 == 0: checkpoint = trainer.save() print("checkpoint saved at", checkpoint) result['hist_stats']['episode_reward'] ``` ## OR train using the tune way (better so far) ``` analysis = tune.run( "DQN", name = "DQN10-paralellism", checkpoint_at_end=True, stop={ "timesteps_total": 4000, }, config={ "env": "ray_trading_env", "lr": grid_search([1e-4]), # try different lrs "num_workers": 2, # parallelism, }, ) #Use the below command to see results #tensorboard --logdir=/Users/jasonfiacco/ray_results/DQN2 #Now you can plot the reward results of your tuner. dfs = analysis.trial_dataframes ax = None for d in dfs.values(): ax = d.episode_reward_mean.plot(ax=ax, legend=True) ``` ## Restoring an already existing agent that I tuned ``` import os logdir = analysis.get_best_logdir("episode_reward_mean", mode="max") trainer.restore(os.path.join(logdir, "checkpoint_993/checkpoint-993")) trainer.restore("/Users/jasonfiacco/ray_results/DQN4/DQN_ray_trading_env_fedb24f0_0_lr=1e-06_2020-03-03_15-46-02kzbdv53d/checkpoint_5/checkpoint-5") ``` ## Testing ``` #Set up a testing environment with test data. test_env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme='simple', reward_scheme='simple', window_size=15, enable_logger=False, renderers = 'screenlog' ) for episode_num in range(1): state = test_env.reset() done = False cumulative_reward = 0 step = 0 action = trainer.compute_action(state) while not done: action = trainer.compute_action(state) state, reward, done, results = test_env.step(action) cumulative_reward += reward #Render every 100 steps: if step % 100 == 0: test_env.render() step += 1 print("Cumulative reward: ", cumulative_reward) ``` ## Plot ``` %matplotlib inline portfolio.performance.plot() portfolio.performance.net_worth.plot() #Plot the total balance in each type of item p = portfolio.performance p2 = p.iloc[:, :] weights = p2.loc[:, [("/worth" in name) for name in p2.columns]] weights.iloc[:, 1:8].plot() ``` ## Try Plotly Render too ``` from tensortrade.environments.render import PlotlyTradingChart from tensortrade.environments.render import FileLogger chart_renderer = PlotlyTradingChart( height = 800 ) file_logger = FileLogger( filename='example.log', # omit or None for automatic file name path='training_logs' # create a new directory if doesn't exist, None for no directory ) price_history.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume'] env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme='managed-risk', reward_scheme='risk-adjusted', window_size=20, price_history=price_history, renderers = [chart_renderer, file_logger] ) from tensortrade.agents import DQNAgent agent = DQNAgent(env) agent.train(n_episodes=1, n_steps=1000, render_interval=1) ``` ## Extra Stuff ``` apath = "/Users/jasonfiacco/Documents/Yale/Senior/thesis/jasonfiacco-selectedmarkets-mytickers.xlsx" df = pd.read_excel(apath, skiprows=2) jason_tickers = df.iloc[:, 5].tolist() descriptions = df.iloc[:, 1].tolist() for ticker, description in zip(jason_tickers, descriptions): l = "{} = Instrument(\'{}\', 2, \'{}\')".format(ticker, ticker, description) print(l) ```
github_jupyter
## 1. Convert pdf to image ``` ## NOTE: install tesseract (https://github.com/UB-Mannheim/tesseract/wiki) and Poppler first # !pip install pytesseract # !pip install Pillow # !pip install pdf2image # import statements from PIL import Image from pdf2image import convert_from_path import sys import os import numpy as np folder_path = 'C:\\Users\Vanessa\\Downloads\\for_ocr' file_list = os.listdir(folder_path) # remove duplicates from list unique_files = [file for file in file_list if "(1)" not in file] # convert pdf to image in PNG format def pdf_to_imgs(folder_path, file): pages = convert_from_path(f"{folder_path}\\{file}", 500) # counter for image file img_counter = 1 # for each unique page, make a filename and save as png for page in pages: filename = f"{file}_{img_counter}.png".replace('.pdf','') print(f'Saving {filename}') page.save(filename, 'PNG') img_counter += 1 for file in unique_files: pdf_to_imgs(folder_path, file) ``` ## 2. Check file integrity, size ``` folder_path = 'C:\\Users\\Vanessa\\Jupyter Notebooks\\STUFF' file_list = [f for f in os.listdir(folder_path) if f.endswith('.png')] print('Total files to check:', len(file_list)) # getting maximum dimension of each image max_width = 0 max_height = 0 for file in file_list: try: with Image.open(os.path.join(folder_path, file)) as img: width, height = img.size if width > max_width: max_width = width if height > max_height: max_height = height except: print(file) print('Maximum Width: ', max_width) print('Maximum Height: ', max_height) ``` ## 3. Convert image to OCR ``` import cv2 as cv import pytesseract pytesseract.pytesseract.tesseract_cmd=r'C:\Program Files\Tesseract-OCR\tesseract.exe' custom_config = r' --psm 6' # method to ocr def remove_header_bg(img): # convert image to hsv img_hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) h, s, v = cv.split(img_hsv) # threshold saturation img thresh1 = cv.threshold(s, 92, 255, cv.THRESH_BINARY)[1] # threshold value img then invert thresh2 = cv.threshold(v, 128, 255, cv.THRESH_BINARY_INV)[1] # make mask mask = cv.add(thresh1, thresh2) # apply mask to remove unwanted background on figure processed_img = img.copy() processed_img[mask==0] = (255,255,255) lined_img = processed_img.copy() # convert to greyscale gray = cv.cvtColor(lined_img, cv.COLOR_BGR2GRAY) blur = cv.GaussianBlur(gray,(5,5),0) thresh = cv.threshold(blur, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)[1] # remove horizontal lines hor_kernel = cv.getStructuringElement(cv.MORPH_RECT, (100,1)) remove_hor = cv.morphologyEx(thresh, cv.MORPH_OPEN, hor_kernel, iterations=2) cnts = cv.findContours(remove_hor, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if len(cnts) == 2 else cnts[1] for c in cnts: cv.drawContours(lined_img, [c], -1, (255,255,255), 5) # try to read text text = pytesseract.image_to_string(lined_img, config=custom_config) return text # get imgage files img_path = os.path.abspath('') imgs = [file for file in os.listdir(img_path) if file.endswith('.png')] imgs.sort() for img in imgs: fname = os.path.splitext(img)[0] image = cv.imread(img) title = remove_header_bg(image[1200:1700 , 100:5900]) header = remove_header_bg(image[1800:1950 , 100:5900]) contents = remove_header_bg(image[2100:7100 , 100:5900]) with open(f'{fname}.txt', 'a') as f: f.write(title) f.write(header) f.write(contents) print(fname,' converted') print('All img files converted') ```
github_jupyter
[Table of Contents](./table_of_contents.ipynb) # Smoothing ``` #format the book %matplotlib inline from __future__ import division, print_function from book_format import load_style load_style() ``` ## Introduction The performance of the Kalman filter is not optimal when you consider future data. For example, suppose we are tracking an aircraft, and the latest measurement deviates far from the current track, like so (I'll only consider 1 dimension for simplicity): ``` import matplotlib.pyplot as plt data = [10.1, 10.2, 9.8, 10.1, 10.2, 10.3, 10.1, 9.9, 10.2, 10.0, 9.9, 11.4] plt.plot(data) plt.xlabel('time') plt.ylabel('position'); ``` After a period of near steady state, we have a very large change. Assume the change is past the limit of the aircraft's flight envelope. Nonetheless the Kalman filter incorporates that new measurement into the filter based on the current Kalman gain. It cannot reject the noise because the measurement could reflect the initiation of a turn. Granted it is unlikely that we are turning so abruptly, but it is impossible to say whether * The aircraft started a turn awhile ago, but the previous measurements were noisy and didn't show the change. * The aircraft is turning, and this measurement is very noisy * The measurement is very noisy and the aircraft has not turned * The aircraft is turning in the opposite direction, and the measurement is extremely noisy Now, suppose the following measurements are: 11.3 12.1 13.3 13.9 14.5 15.2 ``` data2 = [11.3, 12.1, 13.3, 13.9, 14.5, 15.2] plt.plot(data + data2); ``` Given these future measurements we can infer that yes, the aircraft initiated a turn. On the other hand, suppose these are the following measurements. ``` data3 = [9.8, 10.2, 9.9, 10.1, 10.0, 10.3, 9.9, 10.1] plt.plot(data + data3); ``` In this case we are led to conclude that the aircraft did not turn and that the outlying measurement was merely very noisy. ## An Overview of How Smoothers Work The Kalman filter is a *recursive* filter with the Markov property - it's estimate at step `k` is based only on the estimate from step `k-1` and the measurement at step `k`. But this means that the estimate from step `k-1` is based on step `k-2`, and so on back to the first epoch. Hence, the estimate at step `k` depends on all of the previous measurements, though to varying degrees. `k-1` has the most influence, `k-2` has the next most, and so on. Smoothing filters incorporate future measurements into the estimate for step `k`. The measurement from `k+1` will have the most effect, `k+2` will have less effect, `k+3` less yet, and so on. This topic is called *smoothing*, but I think that is a misleading name. I could smooth the data above by passing it through a low pass filter. The result would be smooth, but not necessarily accurate because a low pass filter will remove real variations just as much as it removes noise. In contrast, Kalman smoothers are *optimal* - they incorporate all available information to make the best estimate that is mathematically achievable. ## Types of Smoothers There are three classes of Kalman smoothers that produce better tracking in these situations. * Fixed-Interval Smoothing This is a batch processing based filter. This filter waits for all of the data to be collected before making any estimates. For example, you may be a scientist collecting data for an experiment, and don't need to know the result until the experiment is complete. A fixed-interval smoother will collect all the data, then estimate the state at each measurement using all available previous and future measurements. If it is possible for you to run your Kalman filter in batch mode it is always recommended to use one of these filters a it will provide much better results than the recursive forms of the filter from the previous chapters. * Fixed-Lag Smoothing Fixed-lag smoothers introduce latency into the output. Suppose we choose a lag of 4 steps. The filter will ingest the first 3 measurements but not output a filtered result. Then, when the 4th measurement comes in the filter will produce the output for measurement 1, taking measurements 1 through 4 into account. When the 5th measurement comes in, the filter will produce the result for measurement 2, taking measurements 2 through 5 into account. This is useful when you need recent data but can afford a bit of lag. For example, perhaps you are using machine vision to monitor a manufacturing process. If you can afford a few seconds delay in the estimate a fixed-lag smoother will allow you to produce very accurate and smooth results. * Fixed-Point Smoothing A fixed-point filter operates as a normal Kalman filter, but also produces an estimate for the state at some fixed time $j$. Before the time $k$ reaches $j$ the filter operates as a normal filter. Once $k>j$ the filter estimates $x_k$ and then also updates its estimate for $x_j$ using all of the measurements between $j\dots k$. This can be useful to estimate initial paramters for a system, or for producing the best estimate for an event that happened at a specific time. For example, you may have a robot that took a photograph at time $j$. You can use a fixed-point smoother to get the best possible pose information for the camera at time $j$ as the robot continues moving. ## Choice of Filters The choice of these filters depends on your needs and how much memory and processing time you can spare. Fixed-point smoothing requires storage of all measurements, and is very costly to compute because the output is for every time step is recomputed for every measurement. On the other hand, the filter does produce a decent output for the current measurement, so this filter can be used for real time applications. Fixed-lag smoothing only requires you to store a window of data, and processing requirements are modest because only that window is processed for each new measurement. The drawback is that the filter's output always lags the input, and the smoothing is not as pronounced as is possible with fixed-interval smoothing. Fixed-interval smoothing produces the most smoothed output at the cost of having to be batch processed. Most algorithms use some sort of forwards/backwards algorithm that is only twice as slow as a recursive Kalman filter. ## Fixed-Interval Smoothing There are many fixed-lag smoothers available in the literature. I have chosen to implement the smoother invented by Rauch, Tung, and Striebel because of its ease of implementation and efficiency of computation. It is also the smoother I have seen used most often in real applications. This smoother is commonly known as an RTS smoother. Derivation of the RTS smoother runs to several pages of densely packed math. I'm not going to inflict it on you. Instead I will briefly present the algorithm, equations, and then move directly to implementation and demonstration of the smoother. The RTS smoother works by first running the Kalman filter in a batch mode, computing the filter output for each step. Given the filter output for each measurement along with the covariance matrix corresponding to each output the RTS runs over the data backwards, incorporating its knowledge of the future into the past measurements. When it reaches the first measurement it is done, and the filtered output incorporates all of the information in a maximally optimal form. The equations for the RTS smoother are very straightforward and easy to implement. This derivation is for the linear Kalman filter. Similar derivations exist for the EKF and UKF. These steps are performed on the output of the batch processing, going backwards from the most recent in time back to the first estimate. Each iteration incorporates the knowledge of the future into the state estimate. Since the state estimate already incorporates all of the past measurements the result will be that each estimate will contain knowledge of all measurements in the past and future. Here is it very important to distinguish between past, present, and future so I have used subscripts to denote whether the data is from the future or not. Predict Step $$\begin{aligned} \mathbf{P} &= \mathbf{FP}_k\mathbf{F}^\mathsf{T} + \mathbf{Q } \end{aligned}$$ Update Step $$\begin{aligned} \mathbf{K}_k &= \mathbf{P}_k\mathbf{F}^\mathsf{T}\mathbf{P}^{-1} \\ \mathbf{x}_k &= \mathbf{x}_k + \mathbf{K}_k(\mathbf{x}_{k+1} - \mathbf{Fx}_k) \\ \mathbf{P}_k &= \mathbf{P}_k + \mathbf{K}_k(\mathbf{P}_{k+1} - \mathbf{P})\mathbf{K}_k^\mathsf{T} \end{aligned}$$ As always, the hardest part of the implementation is correctly accounting for the subscripts. A basic implementation without comments or error checking would be: ```python def rts_smoother(Xs, Ps, F, Q): n, dim_x, _ = Xs.shape # smoother gain K = zeros((n,dim_x, dim_x)) x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy for k in range(n-2,-1,-1): Pp[k] = dot(F, P[k]).dot(F.T) + Q # predicted covariance K[k] = dot(P[k], F.T).dot(inv(Pp[k])) x[k] += dot(K[k], x[k+1] - dot(F, x[k])) P[k] += dot(K[k], P[k+1] - Pp[k]).dot(K[k].T) return (x, P, K, Pp) ``` This implementation mirrors the implementation provided in FilterPy. It assumes that the Kalman filter is being run externally in batch mode, and the results of the state and covariances are passed in via the `Xs` and `Ps` variable. Here is an example. ``` import numpy as np from numpy import random from numpy.random import randn import matplotlib.pyplot as plt from filterpy.kalman import KalmanFilter import kf_book.book_plots as bp def plot_rts(noise, Q=0.001, show_velocity=False): random.seed(123) fk = KalmanFilter(dim_x=2, dim_z=1) fk.x = np.array([0., 1.]) # state (x and dx) fk.F = np.array([[1., 1.], [0., 1.]]) # state transition matrix fk.H = np.array([[1., 0.]]) # Measurement function fk.P = 10. # covariance matrix fk.R = noise # state uncertainty fk.Q = Q # process uncertainty # create noisy data zs = np.asarray([t + randn()*noise for t in range (40)]) # filter data with Kalman filter, than run smoother on it mu, cov, _, _ = fk.batch_filter(zs) M, P, C, _ = fk.rts_smoother(mu, cov) # plot data if show_velocity: index = 1 print('gu') else: index = 0 if not show_velocity: bp.plot_measurements(zs, lw=1) plt.plot(M[:, index], c='b', label='RTS') plt.plot(mu[:, index], c='g', ls='--', label='KF output') if not show_velocity: N = len(zs) plt.plot([0, N], [0, N], 'k', lw=2, label='track') plt.legend(loc=4) plt.show() plot_rts(7.) ``` I've injected a lot of noise into the signal to allow you to visually distinguish the RTS output from the ideal output. In the graph above we can see that the Kalman filter, drawn as the green dotted line, is reasonably smooth compared to the input, but it still wanders from from the ideal line when several measurements in a row are biased towards one side of the line. In contrast, the RTS output is both extremely smooth and very close to the ideal output. With a perhaps more reasonable amount of noise we can see that the RTS output nearly lies on the ideal output. The Kalman filter output, while much better, still varies by a far greater amount. ``` plot_rts(noise=1.) ``` However, we must understand that this smoothing is predicated on the system model. We have told the filter that what we are tracking follows a constant velocity model with very low process error. When the filter *looks ahead* it sees that the future behavior closely matches a constant velocity so it is able to reject most of the noise in the signal. Suppose instead our system has a lot of process noise. For example, if we are tracking a light aircraft in gusty winds its velocity will change often, and the filter will be less able to distinguish between noise and erratic movement due to the wind. We can see this in the next graph. ``` plot_rts(noise=7., Q=.1) ``` This underscores the fact that these filters are not *smoothing* the data in colloquial sense of the term. The filter is making an optimal estimate based on previous measurements, future measurements, and what you tell it about the behavior of the system and the noise in the system and measurements. Let's wrap this up by looking at the velocity estimates of Kalman filter vs the RTS smoother. ``` plot_rts(7.,show_velocity=True) ``` The improvement in the velocity, which is an hidden variable, is even more dramatic. ## Fixed-Lag Smoothing The RTS smoother presented above should always be your choice of algorithm if you can run in batch mode because it incorporates all available data into each estimate. Not all problems allow you to do that, but you may still be interested in receiving smoothed values for previous estimates. The number line below illustrates this concept. ``` from kf_book.book_plots import figsize from kf_book.smoothing_internal import * with figsize(y=2): show_fixed_lag_numberline() ``` At step $k$ we can estimate $x_k$ using the normal Kalman filter equations. However, we can make a better estimate for $x_{k-1}$ by using the measurement received for $x_k$. Likewise, we can make a better estimate for $x_{k-2}$ by using the measurements recevied for $x_{k-1}$ and $x_{k}$. We can extend this computation back for an arbitrary $N$ steps. Derivation for this math is beyond the scope of this book; Dan Simon's *Optimal State Estimation* [2] has a very good exposition if you are interested. The essense of the idea is that instead of having a state vector $\mathbf{x}$ we make an augmented state containing $$\mathbf{x} = \begin{bmatrix}\mathbf{x}_k \\ \mathbf{x}_{k-1} \\ \vdots\\ \mathbf{x}_{k-N+1}\end{bmatrix}$$ This yields a very large covariance matrix that contains the covariance between states at different steps. FilterPy's class `FixedLagSmoother` takes care of all of this computation for you, including creation of the augmented matrices. All you need to do is compose it as if you are using the `KalmanFilter` class and then call `smooth()`, which implements the predict and update steps of the algorithm. Each call of `smooth` computes the estimate for the current measurement, but it also goes back and adjusts the previous `N-1` points as well. The smoothed values are contained in the list `FixedLagSmoother.xSmooth`. If you use `FixedLagSmoother.x` you will get the most recent estimate, but it is not smoothed and is no different from a standard Kalman filter output. ``` from filterpy.kalman import FixedLagSmoother, KalmanFilter import numpy.random as random fls = FixedLagSmoother(dim_x=2, dim_z=1, N=8) fls.x = np.array([0., .5]) fls.F = np.array([[1.,1.], [0.,1.]]) fls.H = np.array([[1.,0.]]) fls.P *= 200 fls.R *= 5. fls.Q *= 0.001 kf = KalmanFilter(dim_x=2, dim_z=1) kf.x = np.array([0., .5]) kf.F = np.array([[1.,1.], [0.,1.]]) kf.H = np.array([[1.,0.]]) kf.P *= 200 kf.R *= 5. kf.Q *= 0.001 N = 4 # size of lag nom = np.array([t/2. for t in range (0, 40)]) zs = np.array([t + random.randn()*5.1 for t in nom]) for z in zs: fls.smooth(z) kf_x, _, _, _ = kf.batch_filter(zs) x_smooth = np.array(fls.xSmooth)[:, 0] fls_res = abs(x_smooth - nom) kf_res = abs(kf_x[:, 0] - nom) plt.plot(zs,'o', alpha=0.5, marker='o', label='zs') plt.plot(x_smooth, label='FLS') plt.plot(kf_x[:, 0], label='KF', ls='--') plt.legend(loc=4) print('standard deviation fixed-lag: {:.3f}'.format(np.mean(fls_res))) print('standard deviation kalman: {:.3f}'.format(np.mean(kf_res))) ``` Here I have set `N=8` which means that we will incorporate 8 future measurements into our estimates. This provides us with a very smooth estimate once the filter converges, at the cost of roughly 8x the amount of computation of the standard Kalman filter. Feel free to experiment with larger and smaller values of `N`. I chose 8 somewhat at random, not due to any theoretical concerns. ## References [1] H. Rauch, F. Tung, and C. Striebel. "Maximum likelihood estimates of linear dynamic systems," *AIAA Journal*, **3**(8), pp. 1445-1450 (August 1965). [2] Dan Simon. "Optimal State Estimation," John Wiley & Sons, 2006. http://arc.aiaa.org/doi/abs/10.2514/3.3166
github_jupyter
# 準備 ``` # バージョン指定時にコメントアウト #!pip install torch==1.7.0 #!pip install torchvision==0.8.1 import torch import torchvision # バージョンの確認 print(torch.__version__) print(torchvision.__version__) # Google ドライブにマウント from google.colab import drive drive.mount('/content/gdrive') %cd '/content/gdrive/MyDrive/Colab Notebooks/gan_sample/chapter2' import os import numpy as np import torch import torch.nn as nn import torch.optim as optimizers import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import torchvision import torchvision.transforms as transforms import matplotlib import matplotlib.pyplot as plt %matplotlib inline ``` # データセットの作成 ``` np.random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # データの取得 root = os.path.join('data', 'mnist') transform = transforms.Compose([transforms.ToTensor(), lambda x: x.view(-1)]) mnist_train = \ torchvision.datasets.MNIST(root=root, download=True, train=True, transform=transform) mnist_test = \ torchvision.datasets.MNIST(root=root, download=True, train=False, transform=transform) train_dataloader = DataLoader(mnist_train, batch_size=100, shuffle=True) test_dataloader = DataLoader(mnist_test, batch_size=1, shuffle=False) ``` # ネットワークの定義 ``` class Autoencoder(nn.Module): def __init__(self, device='cpu'): super().__init__() self.device = device self.l1 = nn.Linear(784, 200) self.l2 = nn.Linear(200, 784) def forward(self, x): # エンコーダ h = self.l1(x) # 活性化関数 h = torch.relu(h) # デコーダ h = self.l2(h) # シグモイド関数で0~1の値域に変換 y = torch.sigmoid(h) return y ``` # 学習の実行 ``` # モデルの設定 model = Autoencoder(device=device).to(device) # 損失関数の設定 criterion = nn.BCELoss() # 最適化関数の設定 optimizer = optimizers.Adam(model.parameters()) epochs = 10 # エポックのループ for epoch in range(epochs): train_loss = 0. # バッチサイズのループ for (x, _) in train_dataloader: x = x.to(device) # 訓練モードへの切替 model.train() # 順伝播計算 preds = model(x) # 入力画像xと復元画像predsの誤差計算 loss = criterion(preds, x) # 勾配の初期化 optimizer.zero_grad() # 誤差の勾配計算 loss.backward() # パラメータの更新 optimizer.step() # 訓練誤差の更新 train_loss += loss.item() train_loss /= len(train_dataloader) print('Epoch: {}, Loss: {:.3f}'.format( epoch+1, train_loss )) ``` # 画像の復元 ``` # dataloaderからのデータ取り出し x, _ = next(iter(test_dataloader)) x = x.to(device) # 評価モードへの切替 model.eval() # 復元画像 x_rec = model(x) # 入力画像、復元画像の表示 for i, image in enumerate([x, x_rec]): image = image.view(28, 28).detach().cpu().numpy() plt.subplot(1, 2, i+1) plt.imshow(image, cmap='binary_r') plt.axis('off') plt.show() ```
github_jupyter
# Optimization and gradient descent method ``` from IPython.display import IFrame IFrame(src="https://cdnapisec.kaltura.com/p/2356971/sp/235697100/embedIframeJs/uiconf_id/41416911/partner_id/2356971?iframeembed=true&playerId=kaltura_player&entry_id=1_wota11ay&flashvars[streamerType]=auto&amp;flashvars[localizationCode]=en&amp;flashvars[leadWithHTML5]=true&amp;flashvars[sideBarContainer.plugin]=true&amp;flashvars[sideBarContainer.position]=left&amp;flashvars[sideBarContainer.clickToClose]=true&amp;flashvars[chapters.plugin]=true&amp;flashvars[chapters.layout]=vertical&amp;flashvars[chapters.thumbnailRotator]=false&amp;flashvars[streamSelector.plugin]=true&amp;flashvars[EmbedPlayer.SpinnerTarget]=videoHolder&amp;flashvars[dualScreen.plugin]=true&amp;flashvars[hotspots.plugin]=1&amp;flashvars[Kaltura.addCrossoriginToIframe]=true&amp;&wid=1_o38cisoq",width='800', height='500') ``` ## Download the lecture notes here: [Notes](https://sites.psu.edu/math452/files/2021/12/A06GradientDescent_Video_Notes.pdf) ## Gradient descent method For simplicity, let us just consider a general optimization problem $$ \label{optmodel} \min_{x\in \mathbb{R}^n } f(x). $$ (problem) ![image](../figures/diag_GD.png) ### A general approach: line search method Given any initial guess $x_1$, the line search method uses the following algorithm $$ \eta_t= argmin_{\eta\in \mathbb{R}^1} f(x_t - \eta p_t)\qquad \mbox{(1D minimization problem)} $$ to produce $\{ x_{t}\}_{t=1}^{\infty}$ $$ x_{t+1} = x_{t} - \eta_t p_t. $$ (line-search) Here $\eta_t$ is called the step size in optimization and also learning rate in machine learn ing, $p_t$ is called the descent direction, which is the critical component of this algorithm. And $x_t$ tends to $$ x^*= argmin_{x\in \mathbb{R}^n} f(x) \iff f(x^*)=\min_{x\in \mathbb{R}^n} f(x) $$ as $t$ tends to infinity. There is a series of optimization algorithms which follow the above form just using different choices of $p_t$. Then, the next natural question is what a good choice of $p_t$ is? We have the following theorem to show why gradient direction is a good choice for $p_t$. ```{admonition} lemma Given $x \in \mathbb{R}^n$, if $\nabla f(x)\neq 0$, the fast descent direction of $f$ at $x$ is the negative gradient direction, namely $$ -\frac{\nabla f(x)}{\|\nabla f(x)\|} = \mathop{\arg\min}_{ p \in \mathbb{R}^n, \|p\|=1} \left. \frac{\partial f(x + \eta p)}{\partial \eta} \right|_{\eta=0}. $$ It means that $f(x)$ decreases most rapidly along the negative gradient direction. ``` ```{admonition} proof *Proof.* Let $p$ be a direction in $\mathbb{R}^{n},\|p\|=1$. Consider the local decrease of the function $f(\cdot)$ along direction $p$ $$ \Delta(p)=\lim _{\eta \downarrow 0} \frac{1}{\eta}\left(f(x+\eta p)-f(x)\right)=\left. \frac{\partial f(x + \eta p)}{\partial \eta} \right|_{\eta=0}. $$ Note that $$ \begin{split} \left. \frac{\partial f(x + \eta p)}{\partial \eta} \right|_{\eta=0}=\sum_{i=1}^n\left. \frac{\partial f}{\partial x_i}(x + \eta p)p_i \right|_{\eta=0} =(\nabla f, p), \end{split} $$ which means that $$ f(x+\eta p)-f(x)=\eta(\nabla f(x), p)+o(\eta) . $$ Therefore $$ \Delta(p)=(\nabla f(x), p). $$ Using the Cauchy-Schwarz inequality $-\|x\| \cdot\|y\| \leq( x, y) \leq\|x\| \cdot\|y\|,$ we obtain $$ -\|\nabla f(x)\| \le (\nabla f(x), p)\le \|\nabla f(x)\| . $$ Let us take $$ \bar{p}=-\nabla f(x) /\|\nabla f(x)\|. $$ Then $$ \Delta(\bar{p})=-(\nabla f(x), \nabla f(x)) /\|\nabla f(x)\|=-\|\nabla f(x)\|. $$ The direction $-\nabla f(x)$ (the antigradient) is the direction of the fastest local decrease of the function $f(\cdot)$ at point $x.$ ◻ ``` Here is a simple diagram for this property. Since at each point, $f(x)$ decreases most rapidly along the negative gradient direction, it is then natural to choose the search direction in {eq}`line-search` in the negative gradient direction and the resulting algorithm is the so-called gradient descent method. ```{prf:algorithm} Algrihthm :label: my_algorithm1 Given the initial guess $x_0$, learning rate $\eta_t>0$ **For** t=1,2,$\cdots$, $$ x_{t+1} = x_{t} - \eta_{t} \nabla f({x}_{t}), $$ ``` In practice, we need a "stopping criterion" that determines when the above gradient descent method to stop. One possibility is > **While** $S(x_t; f) = \|\nabla f(x_t)\|\le \epsilon$ or $t \ge T$ for some small tolerance $\epsilon>0$ or maximal number of iterations $T$. In general, a good stopping criterion is hard to come by and it is a subject that has called a lot of research in optimization for machine learning. In the gradient method, the scalar factors for the gradients, $\eta_{t},$ are called the step sizes. Of course, they must be positive. There are many variants of the gradient method, which differ one from another by the step-size strategy. Let us consider the most important examples. 1. The sequence $\left\{\eta_t\right\}_{t=0}^{\infty}$ is chosen in advance. For example, (constant step) $$ \eta_t=\frac{\eta}{\sqrt{t+1}}; $$ 2. Full relaxation: $$ \eta_t=\arg \min _{\eta \geq 0} f\left(x_t-\eta \nabla f\left(x_t\right)\right); $$ 3. The Armijo rule: Find $x_{t+1}=x_t-\eta \nabla f\left(x_t\right)$ with $\eta>0$ such that $$ \alpha\left(\nabla f\left(x_t\right), x_t-x_{t+1}\right) \leq f\left(x_t\right)-f\left(x_{t+1}\right), $$ $$ \beta\left(\nabla f\left(x_t\right), x_t-x_{t+1}\right) \geq f\left(x_t\right)-f\left(x_{t+1}\right), $$ where $0<\alpha<\beta<1$ are some fixed parameters. Comparing these strategies, we see that 1. The first strategy is the simplest one. It is often used in the context of convex optimization. In this framework, the behavior of functions is much more predictable than in the general nonlinear case. 2. The second strategy is completely theoretical. It is never used in practice since even in one-dimensional case we cannot find the exact minimum in finite time. 3. The third strategy is used in the majority of practical algorithms. It has the following geometric interpretation. Let us fix $x \in \mathbb{R}^{n}$ assuming that $\nabla f(x) \neq 0$. Consider the following function of one variable: $$ \phi (\eta)=f(x-\eta \nabla f(x)),\quad \eta\ge0. $$ Then the step-size values acceptable for this strategy belong to the part of the graph of $\phi$ which is located between two linear functions: $$ \phi_{1}(\eta)=f(x)-\alpha \eta\|\nabla f(x)\|^{2}, \quad \phi_{2}(\eta)=f(x)-\beta \eta\|\nabla f(x)\|^{2} $$ Note that $\phi(0)=\phi_{1}(0)=\phi_{2}(0)$ and $\phi^{\prime}(0)<\phi_{2}^{\prime}(0)<\phi_{1}^{\prime}(0)<0 .$ Therefore, the acceptable values exist unless $\phi(\cdot)$ is not bounded below. There are several very fast one-dimensional procedures for finding a point satisfying the Armijo conditions. However, their detailed description is not important for us now. ## Convergence of Gradient Descent method Now we are ready to study the rate of convergence of unconstrained minimization schemes. For the optimization problem {eq}`problem` $$ \min_{x\in \mathbb{R}^n} f(x). $$ We assume that $f(x)$ is convex. Then we say that $x^*$ is a minimizer if $$ f(x^*) = \min_{x \in \mathbb{R}^n} f(x). $$ For minimizer $x^*$, we have $$ \label{key} \nabla f(x^*) = 0. $$ We have the next two properties of the minimizer for convex functions: 1. If $f(x) \ge c_0$, for some $c_0 \in \mathbb{R}$, then we have $$ \mathop{\arg\min} f \neq \emptyset. $$ 2. If $f(x)$ is $\lambda$-strongly convex, then $f(x)$ has a unique minimizer, namely, there exists a unique $x^*\in \mathbb{R}^n$ such that $$ f(x^*) = \min_{x\in \mathbb{R}^n }f(x). $$ To investigate the convergence of gradient descent method, let us recall the gradient descent method: ```{prf:algorithm} Algorithm :label: my_algorithm2 **For**: $t = 1, 2, \cdots$ $$ \label{equ:fgd-iteration} x_{t+1} = x_{t} - \eta_t \nabla f(x_t), $$ where $\eta_t$ is the stepsize / learning rate. ``` We have the next theorem about the convergence of gradient descent method under the Assumption. ```{admonition} Theorem For Gradient Descent Algorithm {prf:ref}`my_algorithm2` , if $f(x)$ satisfies Assumption, then $$ \|x_t - x^*\|^2 \le \alpha^t \|x_0 - x^*\|^2 $$ if $0<\eta_t <\frac{2\lambda}{L^2}$ and $\alpha < 1$. Particularly, if $\eta_t = \frac{\lambda}{L^2}$, then $$ \|x_t - x^*\|^2 \le \left(1 - \frac{\lambda^2}{L^2}\right)^t \|x_0 - x^*\|^2. $$ ``` ```{admonition} Proof *Proof.* Note that $$ x_{t+1} - x = x_{t} - \eta_t \nabla f(x_t) - x. $$ By taking $L^2$ norm for both sides, we get $$ \|x_{t+1} - x \|^2 = \|x_{t} - \eta_t \nabla f(x_t) - x \|^2. $$ Let $x = x^*$. It holds that $$ \begin{aligned} \|x_{t+1} - x^* \|^2 &= \| x_{t} - \eta_t \nabla f(x_t) - x^* \|^2 \\ &= \|x_t-x^*\|^2 - 2\eta_t \nabla f(x_t)^\top (x_t - x^*) + \eta_t^2 \|\nabla f(x_t) - \nabla f(x^*)\|^2 \qquad \mbox{ (by $\nabla f(x^*)=0$)}\\ &\le \|x_t - x^*\|^2 - 2\eta_t \lambda \|x_t - x^*\|^2 + \eta_t ^2 L^2 \|x_t - x^*\|^2 \quad \mbox{(by $\lambda$- strongly convex and Lipschitz)}\\ &\le (1 - 2\eta_t \lambda + \eta_t^2 L^2) \|x_t - x^*\|^2 =\alpha \|x_t - x^*\|^2, \end{aligned} $$ where $$ \alpha = \left(L^2 (\eta_t -{\lambda\over L^2})^2 + 1-{\lambda^2\over L^2}\right)<1\ \mbox{if } 0< \eta_t<\frac{2\lambda}{L^2}. $$ Particularly, if $\eta_t =\frac{\lambda}{L^2}$, $$ \alpha=1-{\lambda^2\over L^2}, $$ which finishes the proof. ◻ ``` This means that if the learning rate is chosen appropriatly, $\{x_t\}_{t=1}^\infty$ from the gradient descent method will converge to the minimizer $x^*$ of the function. There are some issues on Gradient Descent method: - $\nabla f(x_{t})$ is very expensive to compute. - Gradient Descent method does not yield generalization accuracy. The stochastic gradient descent (SGD) method in the next section will focus on these two issues.
github_jupyter
``` import numpy as np import pandas as pd import random df = pd.read_csv('/Users/josephbell/Downloads/iris.csv') df = df.drop("Id", axis = 1) df = df.rename(columns = {"Species" : "target"}) df.head() # train test split def train_test_split(df, target, test_size): # shuffles data random_df = df.sample(frac=1) # splits data into train and test based on test size % test_split = int(test_size * len(df)) train_df = random_df[test_split:] test_df = random_df[:test_split] return train_df, test_df train_df, test_df = train_test_split(df, target='target', test_size=.2) # check to see that data is split properly train_df.shape, test_df.shape data = train_df.values data[:5] class Node(object): def __init__(self, target=None, attribute=None, splitvalue=None, left=None, right=None): self.target = target self.attribute = attribute self.splitvalue = splitvalue self.left = left self.right = right def set_target(self, target): self.target = target def set_attribute(self, attribute, splitvalue): self.attribute = attribute self.splitvalue = splitvalue class DecisionTree(): def __init__(self, min_samples_split=2): self.min_samples_split = min_samples_split # is the data pure meaning does the split contain only 1 class? def check_purity(self, data): # access all the rows of the target column of the data target_column = data[:, -1] # determine the number of unique classes unique_classes = np.unique(target_column) # if the number of unique classes is equal to 1 if len(unique_classes) == 1: # the data is pure, return True return True else: # the data is not pure, return False return False def calculate_entropy(self, data): # access all the rows of the target column of the data target_column = data[:, -1] # determine the number of unique classes _, counts = np.unique(target_column, return_counts=True) # get probabilites of each class probabilities = counts / counts.sum() entropy = sum(probabilities * -np.log2(probabilities)) return entropy def info_gain(self, data, column_index, splitval): split_column_values = data[:, column_index] data_left = data[split_column_values <= splitval] data_right = data[split_column_values > splitval] data_points = len(data_left) + len(data_right) p_data_left = len(data_left) / data_points p_data_right = len(data_right) / data_points info_gain = self.calculate_entropy(data) - (p_data_right * self.calculate_entropy(data_right) + p_data_left * self.calculate_entropy(data_left)) return info_gain def find_best_split(self, data): bestgain = 0 _, n_columns = data.shape for column_index in range(n_columns-1): values = data[:, column_index] unique_values = np.unique(values) for i in range(1,len(unique_values)): splitval = (unique_values[i-1] + unique_values[i]) / 2 gain = self.info_gain(data, column_index, splitval) if gain >= bestgain: bestgain = gain bestattribute = column_index bestsplitval = splitval return bestattribute, bestsplitval def fit(self, data): if len(data) < self.min_samples_split or self.check_purity(data): node = Node() count = 0 target_column = data[:, -1] unique_classes, counts_unique_classes = np.unique(target_column, return_counts = True) index = counts_unique_classes.argmax() for i in counts_unique_classes: if counts_unique_classes[index] == i: count+=1 if count == 1: node.set_target(unique_classes[index]) return node node = Node() column_index ,split = self.find_best_split(data) node.set_attribute(attribute = column_index, splitvalue = split) node.left = self.fit(data[data[:, column_index] < split]) node.right = self.fit(data[data[:, column_index] > split]) return node def get_target(self, row, n): while n.target is None: if row[n.attribute] <= n.splitvalue: n = n.left else: n = n.right return n.target def predict(self, tree, X_test): targets = [] for i in X_test: target = self.get_target(i, tree) targets.append(target) return targets def acc_score(self, y_true, y_pred): acc_score = np.sum(y_true == y_pred) / len(y_pred) return acc_score data = train_df.values X_test = test_df.values[:, :-1] y_test = test_df['target'].values tree = DecisionTree(min_samples_split=10) root = tree.fit(data) y_pred = tree.predict(root, X_test) acc_score = tree.acc_score(y_test, y_pred) print(acc_score) from sklearn.datasets import load_iris from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier X_train = data[:, :-1] y_train = data[:, -1] X_test = test_df.values[:, :-1] y_test = test_df['target'].values clf = DecisionTreeClassifier(min_samples_split=10) clf = clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(accuracy_score(y_test, y_pred)) ```
github_jupyter
# One-step error probability Write a computer program implementing asynchronous deterministic updates for a Hopfield network. Use Hebb's rule with $w_{ii}=0$. Generate and store p=[12,24,48,70,100,120] random patterns with N=120 bits. Each bit is either +1 or -1 with probability $\tfrac{1}{2}$. For each value of ppp estimate the one-step error probability $P_{\text {error}}^{t=1}$ based on $10^5$ independent trials. Here, one trial means that you generate and store a set of p random patterns, feed one of them, and perform one asynchronous update of a single randomly chosen neuron. If in some trials you encounter sgn(0), simply set sgn(0)=1. List below the values of $P_{\text {error}}^{t=1}$ that you obtained in the following form: [$p_1,p_2,\ldots,p_{6}$], where $p_n$ is the value of $P_{\text {error}}^{t=1}$ for the n-th value of p from the list above. Give four decimal places for each $p_n$ ``` import numpy as np import time def calculate_instance( n, p, zero_diagonal): #Create p random patterns patterns = [] for i in range(p): patterns.append(np.random.choice([-1,1],n)) #Create weights matrix according to hebbs rule weights = patterns[0][:,None]*patterns[0] for el in patterns[1:]: weights = weights + el[:,None]*el weights = np.true_divide(weights, n) #Fill diagonal with zeroes if zero_diagonal: np.fill_diagonal(weights,0) #Feed random pattern as input and test if an error occurs S1 = patterns[0] chosen_i = np.random.choice(range(n)) S_i_old = S1[chosen_i] S_i = esign(np.dot(weights[chosen_i], S1)) #breakpoint() return S_i_old == S_i def esign(x): if(x == 0): return 1 else: return np.sign(x) ``` List your numerically computed $P_{\text {error}}^{t=1}$ for the parameters given above. ``` p = [12, 24, 48, 70, 100, 120] N = 120 I = 100000 for p_i in p: solve = [0,0] for i in range(I): ret = calculate_instance(N, p_i, True) if ret: solve[0]+=1 else: solve[1]+=1 p_error = float(solve[1]/I) print(f"Number of patterns: {p_i}, P_error(t=1): {p_error} ") ``` Repeat the task, but now apply Hebb's rule without setting the diagonal weights to zero. For each value of p listed above, estimate the one-step error probability $P_{\text {error}}^{t=1}$ based on $10^5$ independent trials. ``` p = [12, 24, 48, 70, 100, 120] N = 120 I = 100000 for p_i in p: solve = [0,0] for i in range(I): ret = calculate_instance(N, p_i, False) if ret: solve[0]+=1 else: solve[1]+=1 p_error = float(solve[1]/I) print(f"Number of patterns: {p_i}, P_error(t=1): {p_error} ") ```
github_jupyter
# Acquiring Data from open repositories A crucial step in the work of a computational biologist is not only to analyse data, but acquiring datasets to analyse as well as toy datasets to test out computational methods and algorithms. The internet is full of such open datasets. Sometimes you have to sign up and make a user to get authentication, especially for medical data. This can sometimes be time consuming, so here we will deal with easy access resources, mostly of modest size. Multiple python libraries provide a `dataset` module which makes the effort to fetch online data extremely seamless, with little requirement for preprocessing. #### Goal of the notebook Here you will get familiar with some ways to fetch datasets from online. We do some data exploration on the data just for illustration, but the methods will be covered later. # Useful resources and links When playing around with algorithms, it can be practical to use relatively small datasets. A good example is the `datasets` submodule of `scikit-learn`. `Nilearn` (library for neuroimaging) also provides a collection of neuroimaging datasets. Many datasets can also be acquired through the competition website [Kaggle](https://www.kaggle.com), in which they describe how to access the data. ### Links - [OpenML](https://www.openml.org/search?type=data) - [Nilearn datasets](https://nilearn.github.io/modules/reference.html#module-nilearn.datasets) - [Sklearn datasets](https://scikit-learn.org/stable/modules/classes.html?highlight=datasets#module-sklearn.datasets) - [Kaggle](https://www.kaggle.com/datasets) - [MEDNIST] - [**Awesomedata**](https://github.com/awesomedata/awesome-public-datasets) - We strongly recommend to check out the Awesomedata lists of public datasets, covering topics such as [biology/medicine](https://github.com/awesomedata/awesome-public-datasets#biology) and [neuroscience](https://github.com/awesomedata/awesome-public-datasets#neuroscience) - [Papers with code](https://paperswithcode.com) - [SNAP](https://snap.stanford.edu/data/) - Stanford Large Network Dataset Collection - [Open Graph Benchmark (OGB)](https://github.com/snap-stanford/ogb) - Network datasets - [Open Neuro](https://openneuro.org/) - [Open fMRI](https://openfmri.org/dataset/) ``` # import basic libraries import numpy as np import pandas as pd from matplotlib import pyplot as plt ``` We start with scikit-learn's datasets for testing out ML algorithms. Visit [here](https://scikit-learn.org/stable/modules/classes.html?highlight=datasets#module-sklearn.datasets) for an overview of the datasets. ``` from sklearn.datasets import fetch_olivetti_faces, fetch_20newsgroups, load_breast_cancer, load_diabetes, load_digits, load_iris ``` Load the MNIST dataset (images of hand written digits) ``` X,y = load_digits(return_X_y=True) y.shape X.shape #1797 images, 64 pixels per image ``` #### exercise 1. Make a function `plot` taking an argument (k) to visualize the k'th sample. It is currently flattened, you will need to reshape it. Use `plt.imshow` for plotting. ``` # %load solutions/ex2_1.py def plot(k): plt.imshow(X[k].reshape(8,8), cmap='gray') plt.title(f"Number = {y[k]}") plt.show() plot(15); plot(450) faces = fetch_olivetti_faces() ``` #### Exercise 2. Inspect the dataset. How many classes are there? How many samples per class? Also, plot some examples. What do the classes represent? ``` # %load solutions/ex2_2.py # example solution. # You are not expected to make a nice plotting function, # you can simply call plt.imshow a number of times and observe print(faces.DESCR) # this shows there are 40 classes, 10 samples per class print(faces.target) #the targets i.e. classes print(np.unique(faces.target).shape) # another way to see n_classes X = faces.images y = faces.target fig = plt.figure(figsize=(16,5)) idxs = [0,1,2, 11,12,13, 40,41] for i,k in enumerate(idxs): ax=fig.add_subplot(2,4,i+1) ax.imshow(X[k]) ax.set_title(f"target={y[k]}") # looking at a few plots shows that each target is a single person. ``` Once you have made yourself familiar with the dataset you can do some data exploration with unsupervised methods, like below. The next few lines of code are simply for illustration, don't worry about the code (we will cover unsupervised methods in submodule F). ``` from sklearn.decomposition import randomized_svd X = faces.data n_dim = 3 u, s, v = randomized_svd(X, n_dim) ``` Now we have factorized the images into their constituent parts. The code below displays the various components isolated one by one. ``` def show_ims(ims): fig = plt.figure(figsize=(16,10)) idxs = [0,1,2, 11,12,13, 40,41,42, 101,101,103] for i,k in enumerate(idxs): ax=fig.add_subplot(3,4,i+1) ax.imshow(ims[k]) ax.set_title(f"target={y[k]}") for i in range(n_dim): my_s = np.zeros(s.shape[0]) my_s[i] = s[i] recon = [email protected](my_s)@v recon = recon.reshape(400,64,64) show_ims(recon) ``` Are you able to see what the components represent? It at least looks like the second component signifies the lightning (the light direction), the third highlights eyebrows and facial chin shape. ``` from sklearn.manifold import TSNE tsne = TSNE(init='pca', random_state=0) trans = tsne.fit_transform(X) m = 8*10 # choose 4 people plt.figure(figsize=(16,10)) xs, ys = trans[:m,0], trans[:m,1] plt.scatter(xs, ys, c=y[:m], cmap='rainbow') for i,v in enumerate(zip(xs,ys, y[:m])): xx,yy,s = v #plt.text(xx,yy,s) #class plt.text(xx,yy,i) #index ``` Many people seem to have multiple subclusters. What is the difference between those clusters? (e.g. 68,62,65 versus the other 60's) ``` ims = faces.images idxs = [68,62,65,66,60,64,63] #idxs = [9,4,1, 5,3] for k in idxs: plt.imshow(ims[k], cmap='gray') plt.show() def show(im): return plt.imshow(im, cmap='gray') import pandas as pd df= pd.read_csv('data/archive/covid_impact_on_airport_traffic.csv') df.shape df.describe() df.head() df.Country.unique() df.ISO_3166_2.unique() df.AggregationMethod.unique() ``` Here we will look at [OpenML](https://www.openml.org/) - a repository of open datasets free to explore data and test methods. ### Fetching an OpenML dataset We need to pass in an ID to access, as follows: ``` from sklearn.datasets import fetch_openml ``` OpenML contains all sorts of datatypes. By browsing the website we found a electroencephalography (EEG) dataset to explore: ``` data_id = 1471 #this was found by browsing OpenML dataset = fetch_openml(data_id=data_id, as_frame=True) dir(dataset) dataset.url type(dataset) print(dataset.DESCR) original_names = ['AF3', 'F7', 'F3', 'FC5', 'T7', 'P', 'O1', 'O2', 'P8', 'T8', 'FC6', 'F4', 'F8', 'AF4'] dataset.feature_names df = dataset.frame df.head() df.shape[0] / 117 # 128 frames per second df = dataset.frame y = df.Class #df.drop(columns='Class', inplace=True) df.dtypes #def summary(s): # print(s.max(), s.min(), s.mean(), s.std()) # print() # #for col in df.columns[:-1]: # column = df.loc[:,col] # summary(column) df.plot() ``` From the plot we can quickly identify a bunch of huge outliers, making the plot look completely uselss. We assume these are artifacts, and remove them. ``` df2 = df.iloc[:,:-1].clip_upper(6000) df2.plot() ``` Now we see better what is going on. Lets just remove the frames corresponding to those outliers ``` frames = np.nonzero(np.any(df.iloc[:,:-1].values>5000, axis=1))[0] frames df.drop(index=frames, inplace=True) df.plot(figsize=(16,8)) plt.legend(labels=original_names) df.columns ``` ### Do some modelling of the data ``` from sklearn.linear_model import LogisticRegression lasso = LogisticRegression(penalty='l2') X = df.values[:,:-1] y = df.Class y = y.astype(np.int) - 1 # map to 0,1 print(X.shape) print(y.shape) lasso.fit(X,y) comp = (lasso.predict(X) == y).values np.sum(comp.astype(np.int))/y.shape[0] # shitty accuracy lasso.coef_[0].shape names = dataset.feature_names original_names coef = lasso.coef_[0] plt.barh(range(coef.shape[0]), coef) plt.yticks(ticks=range(14),labels=original_names) plt.show() ``` Interpreting the coeficients: we naturally tend to read the magnitude of the coefficients as feature importance. That is a fair interpretation, but currently we did not scale our features to a comparable range prior to fittting the model, so we cannot draw that conclusion. ### Extra exercise. Go to [OpenML](https://openml.org) and use the search function (or just look around) to find any dataset that interest you. Load it using the above methodology, and try to do anything you can to understand the datatype, visualize it etc. ``` ### YOUR CODE HERE ```
github_jupyter
# View Campaign and Interactions In the first notebook `Personalize_BuildCampaign.ipynb` you successfully built and deployed a recommendation model using deep learning with Amazon Personalize. This notebook will expand on that and will walk you through adding the ability to react to real time behavior of users. If their intent changes while browsing a movie, you will see revised recommendations based on that behavior. It will also showcase demo code for simulating user behavior selecting movies before the recommendations are returned. Below we start with just importing libraries that we need to interact with Personalize ``` # Imports import boto3 import json import numpy as np import pandas as pd import time import uuid ``` Below you will paste in the campaign ARN that you used in your previous notebook. Also pick a random user ID from 50 - 300. Lastly you will also need to find your Dataset Group ARN from the previous notebook. ``` # Setup and Config # Recommendations from Event data personalize = boto3.client('personalize') personalize_runtime = boto3.client('personalize-runtime') HRNN_Campaign_ARN = "arn:aws:personalize:us-east-1:930444659029:campaign/DEMO-campaign" # Define User USER_ID = "676" # Dataset Group Arn: datasetGroupArn = "arn:aws:personalize:us-east-1:930444659029:dataset-group/DEMO-dataset-group" # Establish a connection to Personalize's Event Streaming personalize_events = boto3.client(service_name='personalize-events') ``` ## Creating an Event Tracker Before your recommendation system can respond to real time events you will need an event tracker, the code below will generate one and can be used going forward with this lab. Feel free to name it something more clever. ``` response = personalize.create_event_tracker( name='MovieClickTracker', datasetGroupArn=datasetGroupArn ) print(response['eventTrackerArn']) print(response['trackingId']) TRACKING_ID = response['trackingId'] ``` ## Configuring Source Data Above you'll see your tracking ID and this has been assigned to a variable so no further action is needed by you. The lines below are going to setup the data used for recommendations so you can render the list of movies later. ``` data = pd.read_csv('./ml-20m/ratings.csv', sep=',', dtype={'userid': "int64", 'movieid': "int64", 'rating': "float64", 'timestamp': "int64"}) pd.set_option('display.max_rows', 5) data.rename(columns = {'userId':'USER_ID','movieId':'ITEM_ID','rating':'RATING','timestamp':'TIMESTAMP'}, inplace = True) data = data[data['RATING'] > 3] # keep only movies rated 3 data = data[['USER_ID', 'ITEM_ID', 'TIMESTAMP']] # select columns that match the columns in the schema below data items = pd.read_csv('./ml-20m/movies.csv', sep=',', usecols=[0,1], header=0) items.columns = ['ITEM_ID', 'TITLE'] user_id, item_id, _ = data.sample().values[0] item_title = items.loc[items['ITEM_ID'] == item_id].values[0][-1] print("USER: {}".format(user_id)) print("ITEM: {}".format(item_title)) items ``` ## Getting Recommendations Just like in the previous notebook it is a great idea to get a list of recommendatiosn first and then see how additional behavior by a user alters the recommendations. ``` # Get Recommendations as is get_recommendations_response = personalize_runtime.get_recommendations( campaignArn = HRNN_Campaign_ARN, userId = USER_ID, ) item_list = get_recommendations_response['itemList'] title_list = [items.loc[items['ITEM_ID'] == np.int(item['itemId'])].values[0][-1] for item in item_list] print("Recommendations: {}".format(json.dumps(title_list, indent=2))) print(item_list) ``` ## Simulating User Behavior The lines below provide a code sample that simulates a user interacting with a particular item, you will then get recommendations that differ from those when you started. ``` session_dict = {} def send_movie_click(USER_ID, ITEM_ID): """ Simulates a click as an envent to send an event to Amazon Personalize's Event Tracker """ # Configure Session try: session_ID = session_dict[USER_ID] except: session_dict[USER_ID] = str(uuid.uuid1()) session_ID = session_dict[USER_ID] # Configure Properties: event = { "itemId": str(ITEM_ID), } event_json = json.dumps(event) # Make Call personalize_events.put_events( trackingId = TRACKING_ID, userId= USER_ID, sessionId = session_ID, eventList = [{ 'sentAt': int(time.time()), 'eventType': 'EVENT_TYPE', 'properties': event_json }] ) ``` Immediately below this line will update the tracker as if the user has clicked a particular title. ``` # Pick a movie, we will use ID 1653 or Gattica send_movie_click(USER_ID=USER_ID, ITEM_ID=1653) ``` After executing this block you will see the alterations in the recommendations now that you have event tracking enabled and that you have sent the events to the service. ``` get_recommendations_response = personalize_runtime.get_recommendations( campaignArn = HRNN_Campaign_ARN, userId = str(USER_ID), ) item_list = get_recommendations_response['itemList'] title_list = [items.loc[items['ITEM_ID'] == np.int(item['itemId'])].values[0][-1] for item in item_list] print("Recommendations: {}".format(json.dumps(title_list, indent=2))) print(item_list) ``` ## Conclusion You can see now that recommendations are altered by changing the movie that a user interacts with, this system can be modified to any application where users are interacting with a collection of items. These tools are available at any time to pull down and start exploring what is possible with the data you have. Finally when you are ready to remove the items from your account, open the `Cleanup.ipynb` notebook and execute the steps there. ``` eventTrackerArn = response['eventTrackerArn'] print("Tracker ARN is: " + str(eventTrackerArn)) ```
github_jupyter
``` import os import math import torch from torch.autograd import Variable from torch.optim import Adam from torch import nn import torch.nn.functional as F from torchvision import transforms from torch.utils.data import DataLoader, random_split, Dataset from scipy.io import wavfile import scipy.signal import numpy as np import audio_transforms import matplotlib.pyplot as plt NUM_CLASSES = 3 BATCH_SIZE = 1 SONG_LENGTH_SECONDS = 10 class MusicDataset(Dataset): def __init__(self, directory, genres, downsample=None, noise=False): self.directory = directory self.files = [] self.downsample = downsample self.noise = noise for label, genre in enumerate(genres): genre_path = os.path.join(directory, genre) self.files.extend([(os.path.join(genre_path, f), label) for f in os.listdir(genre_path)]) def __getitem__(self, index): song, label = self.files[index] rate, data = wavfile.read(f'{self.directory}/{song}') data = data[:44100*SONG_LENGTH_SECONDS] if self.downsample: data = scipy.signal.resample(data, self.downsample * SONG_LENGTH_SECONDS) if self.noise: gauss = np.random.normal(0.01, 0.001, (len(data),)) data = data + gauss # todo: do we need this? #tensor = torch.Tensor(data)# / (2**15) tensor = torch.Tensor(data) / 1 << 31 tensor.unsqueeze_(0) tensor = audio_transforms.MEL2(44100)(tensor) return tensor, torch.tensor(label, dtype=torch.long) def input_size(self): return len(self[0][0][0]) * len(self[0][0][0][0]) def __len__(self): return len(self.files) def load_dataset(downsample=None, noise=False): d = MusicDataset('.', ['rock', 'electro', 'classic'], downsample=downsample, noise=noise) train, validate = random_split(d, [900, 300]) loader = DataLoader(train, batch_size=BATCH_SIZE) validation_loader = DataLoader(validate, batch_size=BATCH_SIZE) return d.input_size(), loader, validation_loader class Model1Linear(nn.Module): def __init__(self, input_size, hidden_size): super().__init__() self.h1 = nn.Linear(input_size, hidden_size) self.h2 = nn.Linear(hidden_size, hidden_size) self.h3 = nn.Linear(hidden_size, hidden_size) self.h4 = nn.Linear(hidden_size, hidden_size) self.h5 = nn.Linear(hidden_size, hidden_size) self.h6 = nn.Linear(hidden_size, hidden_size) self.h7 = nn.Linear(hidden_size, hidden_size) self.h8 = nn.Linear(hidden_size, hidden_size) self.h9 = nn.Linear(hidden_size, NUM_CLASSES) def forward(self, x): x = x.data.view(-1, input_size) x = self.h1(x) x = F.relu(x) x = F.dropout(x, training=self.training) x = self.h2(x) x = F.relu(x) x = F.dropout(x, training=self.training) x = self.h3(x) x = F.relu(x) x = F.dropout(x, training=self.training) x = self.h4(x) x = F.relu(x) x = F.dropout(x, training=self.training) x = self.h5(x) x = F.relu(x) x = F.dropout(x, training=self.training) x = self.h6(x) x = F.relu(x) x = F.dropout(x, training=self.training) x = self.h7(x) x = F.relu(x) x = F.dropout(x, training=self.training) x = self.h8(x) x = F.relu(x) x = F.dropout(x, training=self.training) x = self.h9(x) x = F.softmax(x, dim=1) return x from datetime import datetime def evalulate(model, validation_loader): model.eval() loss = 0.0 for data, labels in validation_loader: predictions_per_class = model(data.cuda()) _, highest_prediction_class = predictions_per_class.max(1) loss += F.nll_loss(predictions_per_class, labels.cuda()) return loss/len(validation_loader) def learn(model, loader, validation_loader, epochs=30, learning_rate=0.001): torch.cuda.empty_cache() optimizer = Adam(params=model.parameters(), lr=learning_rate) f = open(f'{datetime.now().isoformat()}.txt', 'w', buffering=1) for epoch in range(epochs): model.train() total_loss = 0.0 for data, labels in loader: predictions_per_class = model(data.cuda()) highest_prediction, highest_prediction_class = predictions_per_class.max(1) # how good are we? compare output with the target classes loss = F.nll_loss(predictions_per_class, labels.cuda()) total_loss += loss.item() model.zero_grad() loss.backward() optimizer.step() train_loss = total_loss/len(loader) validation_loss = evalulate(model, validation_loader) stats = f'Epoch: {epoch}, Train Loss: {train_loss}, Validation Loss: {validation_loss.item()}' print(stats) f.write(f'{stats}\n') return model input_size, loader, validation_loader = load_dataset() model = Model1Linear(input_size, 500).cuda() learn(model, loader, validation_loader, 10000, learning_rate=0.0001) ```
github_jupyter
## Import the Libraries ``` import os import warnings warnings.filterwarnings('ignore') # importing packages import pandas as pd import re import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline # sklearn packages from sklearn import metrics from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from lightgbm import LGBMClassifier from sklearn.metrics import roc_auc_score, roc_curve from sklearn.model_selection import KFold, StratifiedKFold import gc from sklearn.model_selection import StratifiedKFold plt.style.use("seaborn") %matplotlib inline plt.rcParams['figure.figsize'] = (10,8) ``` # Loading the data ``` #load the train and test data totaldf_onehot = pd.read_csv("totaldata_onehot.csv") #load the train data totaldf_onehot.head() #split the data into train and test traindf_cleaned = totaldf_onehot[totaldf_onehot["source"] == "train"].drop("source", axis = 1) testdf_cleaned = totaldf_onehot[totaldf_onehot["source"] == "test"].drop(["source", "m13"], axis = 1) traindf_cleaned.head() testdf_cleaned.head() submission_df = pd.read_csv("data/sample_submission.csv") submission_df.head() def kfold_lightgbm(train_df, test_df, submission_df,num_folds=3, stratified = True): dt_preds = {} print("Starting LightGBM. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape)) # Cross validation model if stratified: folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001) else: folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001) # Create arrays and dataframes to store results oof_preds = np.zeros(train_df.shape[0]) sub_preds = np.zeros(test_df.shape[0]) feature_importance_df = pd.DataFrame() feats = [f for f in train_df.columns if f not in ["m13"]] print(feats) for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df, train_df['m13'])): train_x, train_y = train_df[feats].iloc[train_idx], train_df['m13'].iloc[train_idx] valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['m13'].iloc[valid_idx] # LightGBM parameters found by Bayesian optimization clf = LGBMClassifier( nthread=4, n_estimators=10000, learning_rate=0.02, num_leaves=34, colsample_bytree=0.9497036, subsample=0.8715623, max_depth=8, reg_alpha=0.041545473, reg_lambda=0.0735294, min_split_gain=0.0222415, min_child_weight=39.3259775, silent=-1, verbose=-1, ) clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)], eval_metric= 'f1', verbose= 200, early_stopping_rounds= 200) oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1] sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits dt_preds[n_fold + 1] = clf.predict(valid_x) fold_importance_df = pd.DataFrame() fold_importance_df["feature"] = feats fold_importance_df["importance"] = clf.feature_importances_ fold_importance_df["fold"] = n_fold + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx]))) print('Fold %2d F1 : %.6f' % (n_fold + 1, metrics.f1_score(valid_y, dt_preds[n_fold + 1]))) del clf, train_x, train_y, valid_x, valid_y gc.collect() print('Full AUC score %.6f' % roc_auc_score(train_df['m13'], oof_preds)) # Write submission file and plot feature importance display_importances(feature_importance_df) return feature_importance_df, dt_preds # Display/plot feature importance def display_importances(feature_importance_df_): cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:40].index best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)] plt.figure(figsize=(8, 10)) sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)) plt.title('LightGBM Features (avg over folds)') plt.tight_layout() plt.show() #plt.savefig('lgbm_importances01.png') feature_df, preds = kfold_lightgbm(traindf_cleaned, testdf_cleaned, submission_df, 3, True) pd.Series(preds).map( lambda x: 1 if x >= 0.2 else 0 ).value_counts() preds.value_counts() ```
github_jupyter
## The next step in the gap analysis is to calculate the Turbine Ideal Energy (TIE) for the wind farm based on SCADA data ``` %load_ext autoreload %autoreload 2 ``` This notebook provides an overview and walk-through of the turbine ideal energy (TIE) method in OpenOA. The TIE metric is defined as the amount of electricity generated by all turbines at a wind farm operating under normal conditions (i.e., not subject to downtime or significant underperformance, but subject to wake losses and moderate turbine performance losses). The approach to calculate TIE is to: 1. Filter out underperforming data from the power curve for each turbine, 2. Develop a statistical relationship between the remaining power data and key atmospheric variables from a long-term reanalysis product 3. Long-term correct the period of record power data using the above statistical relationship 4. Sum up the long-term corrected power data across all turbines to get TIE for the wind farm Here we use different reanalysis products to capture the uncertainty around the modeled wind resource. We also consider uncertainty due to power data accuracy and the power curve filtering choices for identifying normal turbine performance made by the analyst. In this example, the process for estimating TIE is illustrated both with and without uncertainty quantification. ``` # Import required packages import matplotlib.pyplot as plt import numpy as np import pandas as pd from project_ENGIE import Project_Engie from operational_analysis.methods import turbine_long_term_gross_energy ``` In the call below, make sure the appropriate path to the CSV input files is specfied. In this example, the CSV files are located directly in the 'examples/data/la_haute_borne' folder ``` # Load plant object project = Project_Engie('./data/la_haute_borne/') # Load and prepare the wind farm data project.prepare() # Let's take a look at the columns in the SCADA data frame project._scada.df.columns ``` ### TIE calculation without uncertainty quantification Next we create a TIE object which will contain the analysis to be performed. The method has the ability to calculate uncertainty in the TIE metric through a Monte Carlo sampling of filtering thresholds, power data, and reanalysis product choices. For now, we turn this option off and run the method a single time. ``` ta = turbine_long_term_gross_energy.TurbineLongTermGrossEnergy(project) ``` All of the steps in the TI calculation process are pulled under a single run() function. These steps include: 1. Processing reanalysis data to daily averages. 2. Filtering the SCADA data 3. Fitting the daily reanalysis data to daily SCADA data using a Generalized Additive Model (GAM) 4. Apply GAM results to calculate long-term TIE for the wind farm By setting UQ = False (the default argument value), we must manually specify key filtering thresholds that would otherwise be sampled from a range of values through Monte Carlo. Specifically, we must set thresholds applied to the bin_filter() function in the toolkits.filtering class of OpenOA. ``` # Specify filter threshold values to be used wind_bin_thresh = 2.0 # Exclude data outside 2 m/s of the median for each power bin max_power_filter = 0.90 # Don't apply bin filter above 0.9 of turbine capacity ``` We also must decide how to deal with missing data when computing daily sums of energy production from each turbine. Here we set the threshold at 0.9 (i.e., if greater than 90% of SCADA data are available for a given day, scale up the daily energy by the fraction of data missing. If less than 90% data recovery, exclude that day from analysis. ``` # Set the correction threshold to 90% correction_threshold = 0.90 ``` Now we'll call the run() method to calculate TIE, choosing two reanalysis products to be used in the TIE calculation process. ``` # We can choose to save key plots to a file by setting enable_plotting = True and # specifying a directory to save the images. For now we turn off this feature. ta.run(reanal_subset = ['era5', 'merra2'], enable_plotting = False, plot_dir = None, wind_bin_thresh = wind_bin_thresh, max_power_filter = max_power_filter, correction_threshold = correction_threshold) ``` Now that we've finished the TIE calculation, let's examine results ``` ta._plant_gross # What is the long-term annual TIE for whole plant print('Long-term turbine ideal energy is %s GWh/year' %np.round(np.mean(ta._plant_gross/1e6),1)) ``` The long-term TIE value of 13.7 GWh/year is based on the mean TIE resulting from the two reanalysis products considered. Next, we can examine how well the filtering worked by examining the power curves for each turbine using the plot_filtered_power_curves() function. ``` # Currently saving figures in examples folder. The folder where figures are saved can be changed if desired. ta.plot_filtered_power_curves(save_folder = "./", output_to_terminal = True) ``` Overall these are very clean power curves, and the filtering algorithms seem to have done a good job of catching the most egregious outliers. Now let's look at the daily data and how well the power curve fit worked ``` # Currently saving figures in examples folder. The folder where figures are saved can be changed if desired. ta.plot_daily_fitting_result(save_folder = "./", output_to_terminal = True) ``` Overall the fit looks good. The modeled data sometimes estimate higher energy at low wind speeds compared to the observed, but keep in mind the model fits to long term wind speed, wind direction, and air density, whereas we are only showing the relationship to wind speed here. Note that 'imputed' means daily power data that were missing for a specific turbine, but were calculated by establishing statistical relationships with that turbine and its neighbors. This is necessary since a wind farm often has one turbine down and, without imputation, very little daily data would be left if we excluded days when a turbine was down. ### TIE calculation including uncertainty quantification Now we will create a TIE object for calculating TIE and quantifying the uncertainty in our estimate. The method estimates uncertainty in the TIE metric through a Monte Carlo sampling of filtering thresholds, power data, and reanalysis product choices. Note that we set the number of Monte Carlo simulations to only 100 in this example because of the relatively high computational effort required to perform a single iteration. In practice, a larger number of simulations is recommended (the default value is 2000). ``` ta = turbine_long_term_gross_energy.TurbineLongTermGrossEnergy(project, UQ = True, # enable uncertainty quantification num_sim = 100 # number of Monte Carlo simulations to perform ) ``` With uncertainty quantification enabled (UQ = True), we can specify the assumed uncertainty of the SCADA power data (0.5% by default) and ranges of two key filtering thresholds from which the Monte Carlo simulations will sample. Specifically, these thresholds are applied to the bin_filter() function in the toolkits.filtering class of OpenOA. Note that the following parameters are the default values used in the run() method. ``` uncertainty_scada=0.005 # Assumed uncertainty of SCADA power data (0.5%) # Range of filter threshold values to be used by Monte Carlo simulations # Data outside of a range of wind speeds from 1 to 3 m/s of the median for each power bin are considered wind_bin_thresh=(1, 3) # The bin filter will be applied up to fractions of turbine capacity from 80% to 90% max_power_filter=(0.8, 0.9) ``` We will consider a range of availability thresholds for dealing with missing data when computing daily sums of energy production from each turbine (i.e., if greater than the given threshold of SCADA data are available for a given day, scale up the daily energy by the fraction of data missing. If less than the given threshold of data are available, exclude that day from analysis. Here we set the range of thresholds as 85% to 95%. ``` correction_threshold=(0.85, 0.95) ``` Now we'll call the run() method to calculate TIE with uncertainty quantification, again choosing two reanalysis products to be used in the TIE calculation process. Note that without uncertainty quantification (UQ = False), a separate TIE value is calculated for each reanalysis product specified. However, when UQ = True, the reanalysis product is treated as another Monte Carlo sampling parameter. Thus, the impact of different reanlysis products is considered to be part of the overall uncertainty in TIE. ``` # We can choose to save key plots to a file by setting enable_plotting = True and # specifying a directory to save the images. For now we turn off this feature. ta.run(reanal_subset = ['era5', 'merra2'], enable_plotting = False, plot_dir = None, uncertainty_scada = uncertainty_scada, wind_bin_thresh = wind_bin_thresh, max_power_filter = max_power_filter, correction_threshold = correction_threshold) ``` Now that we've finished the Monte Carlo TIE calculation simulations, let's examine results ``` np.mean(ta._plant_gross) np.std(ta._plant_gross) # Mean long-term annual TIE for whole plant print('Mean long-term turbine ideal energy is %s GWh/year' %np.round(np.mean(ta._plant_gross/1e6),1)) # Uncertainty in long-term annual TIE for whole plant print('Uncertainty in long-term turbine ideal energy is %s GWh/year, or %s percent' % (np.round(np.std(ta._plant_gross/1e6),1), np.round(100*np.std(ta._plant_gross)/np.mean(ta._plant_gross),1))) ``` As expected, the mean long-term TIE is close to the earlier estimate without uncertainty quantification. A relatively low uncertainty has been estimated for the TIE calculations. This is a result of the relatively close agreement between the two reanalysis products and the clean power curves plotted earlier.
github_jupyter
# Code Review #1 Purpose: To introduce the group to looking at code analytically Created By: Hawley Helmbrecht Creation Date: 10-12-21 # Introduction to Analyzing Code All snipets within this section are taken from the Hitchhiker's Guide to Python (https://docs.python-guide.org/writing/style/) ### Example 1: Explicit Code ``` def make_complex(*args): x, y = args return dict(**locals()) def make_complex(x, y): return {'x': x, 'y': y} ``` ### Example 2: One Statement per Line ``` print('one'); print('two') if x == 1: print('one') if <complex comparison> and <other complex comparison>: # do something print('one') print('two') if x == 1: print('one') cond1 = <complex comparison> cond2 = <other complex comparison> if cond1 and cond2: # do something ``` ## Intro to Pep 8 Example 1: Limit all lines to a maximum of 79 characters. ``` #Wrong: income = (gross_wages + taxable_interest + (dividends - qualified_dividends) - ira_deduction - student_loan_interest) #Correct: income = (gross_wages + taxable_interest + (dividends - qualified_dividends) - ira_deduction - student_loan_interest) ``` Example 2: Line breaks around binary operators ``` # Wrong: # operators sit far away from their operands income = (gross_wages + taxable_interest + (dividends - qualified_dividends) - ira_deduction - student_loan_interest) # Correct: # easy to match operators with operands income = (gross_wages + taxable_interest + (dividends - qualified_dividends) - ira_deduction - student_loan_interest) ``` Example 3: Import formatting ``` # Correct: import os import sys # Wrong: import sys, os ``` ## Let's look at some code! Sci-kit images Otsu Threshold code! (https://github.com/scikit-image/scikit-image/blob/main/skimage/filters/thresholding.py) ``` def threshold_otsu(image=None, nbins=256, *, hist=None): """Return threshold value based on Otsu's method. Either image or hist must be provided. If hist is provided, the actual histogram of the image is ignored. Parameters ---------- image : (N, M[, ..., P]) ndarray, optional Grayscale input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. hist : array, or 2-tuple of arrays, optional Histogram from which to determine the threshold, and optionally a corresponding array of bin center intensities. If no hist provided, this function will compute it from the image. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. References ---------- .. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_otsu(image) >>> binary = image <= thresh Notes ----- The input image must be grayscale. """ if image is not None and image.ndim > 2 and image.shape[-1] in (3, 4): warn(f'threshold_otsu is expected to work correctly only for ' f'grayscale images; image shape {image.shape} looks like ' f'that of an RGB image.') # Check if the image has more than one intensity value; if not, return that # value if image is not None: first_pixel = image.ravel()[0] if np.all(image == first_pixel): return first_pixel counts, bin_centers = _validate_image_histogram(image, hist, nbins) # class probabilities for all possible thresholds weight1 = np.cumsum(counts) weight2 = np.cumsum(counts[::-1])[::-1] # class means for all possible thresholds mean1 = np.cumsum(counts * bin_centers) / weight1 mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1] # Clip ends to align class 1 and class 2 variables: # The last value of ``weight1``/``mean1`` should pair with zero values in # ``weight2``/``mean2``, which do not exist. variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2 idx = np.argmax(variance12) threshold = bin_centers[idx] return threshold ``` What do you observe about the code that makes it pythonic? ``` Do the pythonic conventions make it easier to understand? ``` How is the documentation on this function?
github_jupyter
___ <img src='logo.png' /></a> ___ # Python Crash Course Exercises - Solutions ## Exercises Answer the questions or complete the tasks outlined in bold below, use the specific method described if applicable. ** What is 7 to the power of 4?** ``` 7**4 ``` ** Split this string:** s = "Hi there Sam!" **into a list. ** ``` s = 'Hi there Sam!' s.split() ``` ** Given the variables:** planet = "Earth" diameter = 12742 ** Use .format() to print the following string: ** The diameter of Earth is 12742 kilometers. ``` planet = "Earth" diameter = 12742 print("The diameter of {} is {} kilometers.".format(planet,diameter)) ``` ** Given this nested list, use indexing to grab the word "hello" ** ``` lst = [1,2,[3,4],[5,[100,200,['hello']],23,11],1,7] lst[-3][1][2][0] ``` ** Given this nest dictionary grab the word "hello". Be prepared, this will be annoying/tricky ** ``` d = {'k1':[1,2,3,{'tricky':['oh','man','inception',{'target':[1,2,3,'hello']}]}]} d['k1'][3]['tricky'][3]['target'][3] ``` ** What is the main difference between a tuple and a list? ** ``` # Tuple is immutable ``` ** Create a function that grabs the email website domain from a string in the form: ** [email protected] **So for example, passing "[email protected]" would return: domain.com** ``` def domainGet(email): return email.split('@')[-1] domainGet('[email protected]') ``` ** Create a basic function that returns True if the word 'dog' is contained in the input string. Don't worry about edge cases like a punctuation being attached to the word dog, but do account for capitalization. ** ``` def findDog(st): return 'dog' in st.lower().split() findDog('Is there a dog here?') ``` ** Create a function that counts the number of times the word "dog" occurs in a string. Again ignore edge cases. ** ``` def countDog(st): count = 0 for word in st.lower().split(): if word == 'dog': count += 1 return count countDog('This dog runs faster than the other dog dude!') ``` ** Use lambda expressions and the filter() function to filter out words from a list that don't start with the letter 's'. For example:** seq = ['soup','dog','salad','cat','great'] **should be filtered down to:** ['soup','salad'] ``` seq = ['soup','dog','salad','cat','great'] list(filter(lambda word: word[0]=='s',seq)) ``` ### Final Problem **You are driving a little too fast, and a police officer stops you. Write a function to return one of 3 possible results: "No ticket", "Small ticket", or "Big Ticket". If your speed is 60 or less, the result is "No Ticket". If speed is between 61 and 80 inclusive, the result is "Small Ticket". If speed is 81 or more, the result is "Big Ticket". Unless it is your birthday (encoded as a boolean value in the parameters of the function) -- on your birthday, your speed can be 5 higher in all cases. ** ``` def caught_speeding(speed, is_birthday): if is_birthday: speeding = speed - 5 else: speeding = speed if speeding > 80: return 'Big Ticket' elif speeding > 60: return 'Small Ticket' else: return 'No Ticket' caught_speeding(81,True) caught_speeding(81,False) ``` # Great job!
github_jupyter
# TV Script Generation In this project, you'll generate your own [Seinfeld](https://en.wikipedia.org/wiki/Seinfeld) TV scripts using RNNs. You'll be using part of the [Seinfeld dataset](https://www.kaggle.com/thec03u5/seinfeld-chronicles#scripts.csv) of scripts from 9 seasons. The Neural Network you'll build will generate a new ,"fake" TV script, based on patterns it recognizes in this training data. ## Get the Data The data is already provided for you in `./data/Seinfeld_Scripts.txt` and you're encouraged to open that file and look at the text. >* As a first step, we'll load in this data and look at some samples. * Then, you'll be tasked with defining and training an RNN to generate a new script! ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ # load in data import helper data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) ``` ## Explore the Data Play around with `view_line_range` to view different parts of the data. This will give you a sense of the data you'll be working with. You can see, for example, that it is all lowercase text, and each new line of dialogue is separated by a newline character `\n`. ``` view_line_range = (2, 12) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()}))) lines = text.split('\n') print('Number of lines: {}'.format(len(lines))) word_count_line = [len(line.split()) for line in lines] print('Average number of words in each line: {}'.format(np.average(word_count_line))) print() print('The lines {} to {}:'.format(*view_line_range)) print('\n'.join(text.split('\n')[view_line_range[0]:view_line_range[1]])) ``` --- ## Implement Pre-processing Functions The first thing to do to any dataset is pre-processing. Implement the following pre-processing functions below: - Lookup Table - Tokenize Punctuation ### Lookup Table To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries: - Dictionary to go from the words to an id, we'll call `vocab_to_int` - Dictionary to go from the id to word, we'll call `int_to_vocab` Return these dictionaries in the following **tuple** `(vocab_to_int, int_to_vocab)` ``` import problem_unittests as tests from collections import Counter def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ # TODO: Implement Function #reference source: inspired/copied from course samples word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in int_to_vocab.items()} # return tuple return vocab_to_int, int_to_vocab """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables) ``` ### Tokenize Punctuation We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks can create multiple ids for the same word. For example, "bye" and "bye!" would generate two different word ids. Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token: - Period ( **.** ) - Comma ( **,** ) - Quotation Mark ( **"** ) - Semicolon ( **;** ) - Exclamation mark ( **!** ) - Question mark ( **?** ) - Left Parentheses ( **(** ) - Right Parentheses ( **)** ) - Dash ( **-** ) - Return ( **\n** ) This dictionary will be used to tokenize the symbols and add the delimiter (space) around it. This separates each symbols as its own word, making it easier for the neural network to predict the next word. Make sure you don't use a value that could be confused as a word; for example, instead of using the value "dash", try using something like "||dash||". ``` def token_lookup(): """ Generate a dict to turn punctuation into a token. :return: Tokenized dictionary where the key is the punctuation and the value is the token """ # TODO: Implement Function retval = { ".": "||Period||", ",": "||Comma||", "\"": "||QuotationMark||", ";": "||Semicolon||", "!": "||ExclamationMark||", "?": "||QuestionMark||", "(": "||LeftParentheses||", ")": "||RightParentheses||", "-": "||Dash||", "\n": "||Return||", } return retval """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_tokenize(token_lookup) ``` ## Pre-process all the data and save it Running the code cell below will pre-process all the data and save it to file. You're encouraged to lok at the code for `preprocess_and_save_data` in the `helpers.py` file to see what it's doing in detail, but you do not need to change this code. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ # pre-process training data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) ``` # Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() len(int_text) ``` ## Build the Neural Network In this section, you'll build the components necessary to build an RNN by implementing the RNN Module and forward and backpropagation functions. ### Check Access to GPU ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch # Check for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use a GPU to train your neural network.') ``` ## Input Let's start with the preprocessed input data. We'll use [TensorDataset](http://pytorch.org/docs/master/data.html#torch.utils.data.TensorDataset) to provide a known format to our dataset; in combination with [DataLoader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader), it will handle batching, shuffling, and other dataset iteration functions. You can create data with TensorDataset by passing in feature and target tensors. Then create a DataLoader as usual. ``` data = TensorDataset(feature_tensors, target_tensors) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size) ``` ### Batching Implement the `batch_data` function to batch `words` data into chunks of size `batch_size` using the `TensorDataset` and `DataLoader` classes. >You can batch words using the DataLoader, but it will be up to you to create `feature_tensors` and `target_tensors` of the correct size and content for a given `sequence_length`. For example, say we have these as input: ``` words = [1, 2, 3, 4, 5, 6, 7] sequence_length = 4 ``` Your first `feature_tensor` should contain the values: ``` [1, 2, 3, 4] ``` And the corresponding `target_tensor` should just be the next "word"/tokenized word value: ``` 5 ``` This should continue with the second `feature_tensor`, `target_tensor` being: ``` [2, 3, 4, 5] # features 6 # target ``` ``` from torch.utils.data import TensorDataset, DataLoader nb_samples = 6 features = torch.randn(nb_samples, 10) labels = torch.empty(nb_samples, dtype=torch.long).random_(10) dataset = TensorDataset(features, labels) loader = DataLoader( dataset, batch_size=2 ) for batch_idx, (x, y) in enumerate(loader): print(x.shape, y.shape) print(features) from torch.utils.data import TensorDataset, DataLoader def batch_data(words, sequence_length, batch_size): """ Batch the neural network data using DataLoader :param words: The word ids of the TV scripts :param sequence_length: The sequence length of each batch :param batch_size: The size of each batch; the number of sequences in a batch :return: DataLoader with batched data """ # TODO: Implement function batch = len(words)//batch_size words = words[:batch*batch_size] feature_tensors, target_tensors = [], [] for ndx in range(len(words) - sequence_length): feature_tensors += [words[ndx:ndx+sequence_length]] target_tensors += [words[ndx+sequence_length]] feature_tensors = torch.LongTensor(feature_tensors) target_tensors = torch.LongTensor(target_tensors) data = TensorDataset(feature_tensors, target_tensors) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True ) # return a dataloader return data_loader # there is no test for this function, but you are encouraged to create # print statements and tests of your own ``` ### Test your dataloader You'll have to modify this code to test a batching function, but it should look fairly similar. Below, we're generating some test text data and defining a dataloader using the function you defined, above. Then, we are getting some sample batch of inputs `sample_x` and targets `sample_y` from our dataloader. Your code should return something like the following (likely in a different order, if you shuffled your data): ``` torch.Size([10, 5]) tensor([[ 28, 29, 30, 31, 32], [ 21, 22, 23, 24, 25], [ 17, 18, 19, 20, 21], [ 34, 35, 36, 37, 38], [ 11, 12, 13, 14, 15], [ 23, 24, 25, 26, 27], [ 6, 7, 8, 9, 10], [ 38, 39, 40, 41, 42], [ 25, 26, 27, 28, 29], [ 7, 8, 9, 10, 11]]) torch.Size([10]) tensor([ 33, 26, 22, 39, 16, 28, 11, 43, 30, 12]) ``` ### Sizes Your sample_x should be of size `(batch_size, sequence_length)` or (10, 5) in this case and sample_y should just have one dimension: batch_size (10). ### Values You should also notice that the targets, sample_y, are the *next* value in the ordered test_text data. So, for an input sequence `[ 28, 29, 30, 31, 32]` that ends with the value `32`, the corresponding output should be `33`. ``` # test dataloader test_text = range(50) t_loader = batch_data(test_text, sequence_length=6, batch_size=10) data_iter = iter(t_loader) sample_x, sample_y = data_iter.next() print(sample_x.shape) print(sample_x) print(sample_y.shape) print(sample_y) ``` --- ## Build the Neural Network Implement an RNN using PyTorch's [Module class](http://pytorch.org/docs/master/nn.html#torch.nn.Module). You may choose to use a GRU or an LSTM. To complete the RNN, you'll have to implement the following functions for the class: - `__init__` - The initialize function. - `init_hidden` - The initialization function for an LSTM/GRU hidden state - `forward` - Forward propagation function. The initialize function should create the layers of the neural network and save them to the class. The forward propagation function will use these layers to run forward propagation and generate an output and a hidden state. **The output of this model should be the *last* batch of word scores** after a complete sequence has been processed. That is, for each input sequence of words, we only want to output the word scores for a single, most likely, next word. ### Hints 1. Make sure to stack the outputs of the lstm to pass to your fully-connected layer, you can do this with `lstm_output = lstm_output.contiguous().view(-1, self.hidden_dim)` 2. You can get the last batch of word scores by shaping the output of the final, fully-connected layer like so: ``` # reshape into (batch_size, seq_length, output_size) output = output.view(batch_size, -1, self.output_size) # get last batch out = output[:, -1] ``` ``` #reference source: inspired/copied from course samples import numpy as np def one_hot_encode(arr, n_labels): arr = arr.cpu().numpy() # Initialize the the encoded array one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32) # Fill the appropriate elements with ones one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1. # Finally reshape it to get back to the original array one_hot = one_hot.reshape((*arr.shape, n_labels)) if(train_on_gpu): return torch.from_numpy(one_hot).cuda() else: return torch.from_numpy(one_hot) # check that the function works as expected test_seq = np.array([[3, 5, 1]]) test_seq = torch.from_numpy(test_seq) print(test_seq) one_hot = one_hot_encode(test_seq, 8) print(one_hot) import torch.nn as nn class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): """ Initialize the PyTorch RNN Module :param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary) :param output_size: The number of output dimensions of the neural network :param embedding_dim: The size of embeddings, should you choose to use them a :param hidden_dim: The size of the hidden layer outputs :param dropout: dropout to add in between LSTM/GRU layers """ super(RNN, self).__init__() # TODO: Implement function # set class variables self.input_dim = vocab_size self.hidden_dim = hidden_dim self.output_dim = output_size self.n_layers = n_layers self.dropout_prob = dropout self.embedding_dim = embedding_dim ## define model layers self.embed = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, self.hidden_dim, self.n_layers, dropout=self.dropout_prob, batch_first=True) self.dropout = nn.Dropout(dropout) #final fully connected self.fc = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, nn_input, hidden): """ Forward propagation of the neural network :param nn_input: The input to the neural network :param hidden: The hidden state :return: Two Tensors, the output of the neural network and the latest hidden state """ # TODO: Implement function # ## outputs and the new hidden state # nn_input = one_hot_encode(nn_input, self.input_dim) embedding = self.embed(nn_input) lstm_output, hidden = self.lstm(embedding, hidden) # lstm_output, hidden = self.lstm(nn_input, hidden) #without embedding out = self.dropout(lstm_output) #stack the outputs of the lstm to pass to your fully-connected layer out = out.contiguous().view(-1, self.hidden_dim) out = self.fc(out) ##From notes above #The output of this model should be the last batch of word scores after a complete sequence has been processed. #That is, for each input sequence of words, we only want to output the word scores for a single, most likely, next word. # reshape into (batch_size, seq_length, output_size) out = out.view(self.batch_size, -1, self.output_dim) # get last batch out = out[:, -1] # return one batch of output word scores and the hidden state return out, hidden def init_hidden(self, batch_size): ''' Initialize the hidden state of an LSTM/GRU :param batch_size: The batch_size of the hidden state :return: hidden state of dims (n_layers, batch_size, hidden_dim) ''' # Implement function self.batch_size = batch_size weight = next(self.parameters()).data # two new tensors with sizes n_layers x batch_size x n_hidden # initialize hidden state with zero weights, and move to GPU if available if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_rnn(RNN, train_on_gpu) ``` ### Define forward and backpropagation Use the RNN class you implemented to apply forward and back propagation. This function will be called, iteratively, in the training loop as follows: ``` loss = forward_back_prop(decoder, decoder_optimizer, criterion, inp, target) ``` And it should return the average loss over a batch and the hidden state returned by a call to `RNN(inp, hidden)`. Recall that you can get this loss by computing it, as usual, and calling `loss.item()`. **If a GPU is available, you should move your data to that GPU device, here.** ``` def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): """ Forward and backward propagation on the neural network :param decoder: The PyTorch Module that holds the neural network :param decoder_optimizer: The PyTorch optimizer for the neural network :param criterion: The PyTorch loss function :param inp: A batch of input to the neural network :param target: The target output for the batch of input :return: The loss and the latest hidden state Tensor """ # TODO: Implement Function #one hot encoding? #required for non embeded case only # zero accumulated gradients rnn.zero_grad() #To avoid retain_graph=True, inspired from course discussions hidden = (hidden[0].detach(), hidden[1].detach()) # move data to GPU, if available if(train_on_gpu): inp = inp.cuda() target = target.cuda() output, hidden = rnn(inp, hidden) loss = criterion(output, target) #target.view(batch_size*sequence_length) # perform backpropagation and optimization # loss.backward(retain_graph=True) #Removed due to high resource consumption loss.backward() ##did not get any advantage # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. # nn.utils.clip_grad_norm_(rnn.parameters(), clip) ? optimizer.step() # return the loss over a batch and the hidden state produced by our model return loss.item(), hidden # Note that these tests aren't completely extensive. # they are here to act as general checks on the expected outputs of your functions """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_forward_back_prop(RNN, forward_back_prop, train_on_gpu) ``` ## Neural Network Training With the structure of the network complete and data ready to be fed in the neural network, it's time to train it. ### Train Loop The training loop is implemented for you in the `train_decoder` function. This function will train the network over all the batches for the number of epochs given. The model progress will be shown every number of batches. This number is set with the `show_every_n_batches` parameter. You'll set this parameter along with other parameters in the next section. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print("Training for %d epoch(s), %d batch size, %d show every..." % (n_epochs, batch_size, show_every_n_batches)) for epoch_i in range(1, n_epochs + 1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure you iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns a trained rnn return rnn #modified version with detailed printing, global loss for loaded network (rnn), and saving network def train_rnn_copy(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100, myGlobalLoss=10): batch_losses = [] rnn.train() print("Training for %d epoch(s), %d batch size, show every %d, global loss %.4f..." % (n_epochs, batch_size, show_every_n_batches, myGlobalLoss)) for epoch_i in range(1, n_epochs + 1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure you iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches == 0: avgLoss = np.average(batch_losses) print('Epoch: {:>4}/{:<4} Batch: {:>4}/{:<4} Loss: {}'.format( epoch_i, n_epochs, batch_i, n_batches, np.average(batch_losses))) batch_losses = [] if(myGlobalLoss > avgLoss): print('Global Loss {} ---> {}. Saving...'.format(myGlobalLoss, avgLoss)) myGlobalLoss = avgLoss #saved at batch level for quick testing and restart #should be moved to epoch level to avoid saving semi-trained network helper.save_model('./save/trained_rnn_mid_we', rnn) # returns a trained rnn return rnn ``` ### Hyperparameters Set and train the neural network with the following parameters: - Set `sequence_length` to the length of a sequence. - Set `batch_size` to the batch size. - Set `num_epochs` to the number of epochs to train for. - Set `learning_rate` to the learning rate for an Adam optimizer. - Set `vocab_size` to the number of uniqe tokens in our vocabulary. - Set `output_size` to the desired size of the output. - Set `embedding_dim` to the embedding dimension; smaller than the vocab_size. - Set `hidden_dim` to the hidden dimension of your RNN. - Set `n_layers` to the number of layers/cells in your RNN. - Set `show_every_n_batches` to the number of batches at which the neural network should print progress. If the network isn't getting the desired results, tweak these parameters and/or the layers in the `RNN` class. ``` # Data params # Sequence Length, # of words in a sequence sequence_length = 10 # Batch Size if(train_on_gpu): batch_size = 512 #128 #64 else: batch_size = 5 # data loader - do not change train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters myGlobalLoss = 5 myDropout = 0.5 #0.8 # Number of Epochs num_epochs = 10 #5 #50 # Learning Rate learning_rate = 0.001 #0.002 #0.005 #0.001 # Model parameters # Vocab size vocab_size = len(vocab_to_int)+1 # Output size output_size = vocab_size # Embedding Dimension embedding_dim = 300 #256 #200 # Hidden Dimension, Usually larger is better performance wise. Common values are 128, 256, 512, hidden_dim = 512 #256 # Number of RNN Layers, Typically between 1-3 n_layers = 2 # Show stats for every n number of batches if(train_on_gpu): show_every_n_batches = 200 else: show_every_n_batches = 1 ``` ### Train In the next cell, you'll train the neural network on the pre-processed data. If you have a hard time getting a good loss, you may consider changing your hyperparameters. In general, you may get better results with larger hidden and n_layer dimensions, but larger models take a longer time to train. > **You should aim for a loss less than 3.5.** You should also experiment with different sequence lengths, which determine the size of the long range dependencies that a model can learn. ``` #for debugging purposes # import os # os.environ['CUDA_LAUNCH_BLOCKING'] = "1" """ DON'T MODIFY ANYTHING IN THIS CELL """ # create model and move to gpu if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=myDropout) if train_on_gpu: rnn.cuda() # defining loss and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() try: rnn = helper.load_model('./save/trained_rnn_mid_we') print("loaded mid save model") except: try: rnn = helper.load_model('./save/trained_rnn') print("failed mid save.. loaded global model") except: print("could not load any model") finally: print(rnn) # training the model trained_rnn = train_rnn_copy(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches, myGlobalLoss) # saving the trained model helper.save_model('./save/trained_rnn', trained_rnn) print('Model Trained and Saved') ``` ### Question: How did you decide on your model hyperparameters? For example, did you try different sequence_lengths and find that one size made the model converge faster? What about your hidden_dim and n_layers; how did you decide on those? **Answer:** (Write answer, here) - Tried with multiple combinations of hyperparameters to get optimum results. - sequence_length: Tried different sequence lengths between 5-30. Higher sequence lengths took more time to train. Therefore, used 10 which gave satisfactory results. - batch size: Higher batch size resulted in better results. Due to GPU memory limitations used 512 with embedding. When tried without embedding, the maximum size (again due to memory limitation) was 128 - embedding layer: To begin with, for experimentation purposes, did not use embedding. Later, when the embedding was used memory and time seedup were recorded. - learning rate: Tried different leanring rates. During initial investigations, higher learning rates ~0.01 did not converge well to a satisfactory solution. Also, tried decreaing learning rate (manually) after a few epoches to see marginal improvements. Then tried between 0.001 to 0.0005. 0.001 gave the best results. Therefore, used the same. - hidden dim: Increasing hidden dim decreased loss. But, due to memory limitations used 512 - n_layers: A value between 1-3 is recommended. 2 was a good choice and gave good results. --- # Checkpoint After running the above training cell, your model will be saved by name, `trained_rnn`, and if you save your notebook progress, **you can pause here and come back to this code at another time**. You can resume your progress by running the next cell, which will load in our word:id dictionaries _and_ load in your saved model by name! ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./save/trained_rnn') ``` ## Generate TV Script With the network trained and saved, you'll use it to generate a new, "fake" Seinfeld TV script in this section. ### Generate Text To generate the text, the network needs to start with a single word and repeat its predictions until it reaches a set length. You'll be using the `generate` function to do this. It takes a word id to start with, `prime_id`, and generates a set length of text, `predict_len`. Also note that it uses topk sampling to introduce some randomness in choosing the most likely next word, given an output set of word scores! ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): """ Generate text using the neural network :param decoder: The PyTorch Module that holds the trained neural network :param prime_id: The word id to start the first prediction :param int_to_vocab: Dict of word id keys to word values :param token_dict: Dict of puncuation tokens keys to puncuation values :param pad_value: The value used to pad a sequence :param predict_len: The length of text to generate :return: The generated text """ rnn.eval() # create a sequence (batch_size=1) with the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the output of the rnn output, _ = rnn(current_seq, hidden) # get the next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # use top_k sampling to get the index of the next word top_k = 5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely next word index with some element of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the dictionary word = int_to_vocab[word_i] predicted.append(word) # the generated word becomes the next "current sequence" and the cycle can continue current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) # Replace punctuation tokens for key, token in token_dict.items(): ending = ' ' if key in ['\n', '(', '"'] else '' gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\n ', '\n') gen_sentences = gen_sentences.replace('( ', '(') # return all the sentences return gen_sentences ``` ### Generate a New Script It's time to generate the text. Set `gen_length` to the length of TV script you want to generate and set `prime_word` to one of the following to start the prediction: - "jerry" - "elaine" - "george" - "kramer" You can set the prime word to _any word_ in our dictionary, but it's best to start with a name for generating a TV script. (You can also start with any other names you find in the original text file!) ``` # run the cell multiple times to get different results! gen_length = 400 # modify the length to your preference prime_word = 'jerry' # name for starting the script """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length) print(generated_script) ``` #### Save your favorite scripts Once you have a script that you like (or find interesting), save it to a text file! ``` # save script to a text file f = open("generated_script_1.txt","w") f.write(generated_script) f.close() ``` # The TV Script is Not Perfect It's ok if the TV script doesn't make perfect sense. It should look like alternating lines of dialogue, here is one such example of a few generated lines. ### Example generated script >jerry: what about me? > >jerry: i don't have to wait. > >kramer:(to the sales table) > >elaine:(to jerry) hey, look at this, i'm a good doctor. > >newman:(to elaine) you think i have no idea of this... > >elaine: oh, you better take the phone, and he was a little nervous. > >kramer:(to the phone) hey, hey, jerry, i don't want to be a little bit.(to kramer and jerry) you can't. > >jerry: oh, yeah. i don't even know, i know. > >jerry:(to the phone) oh, i know. > >kramer:(laughing) you know...(to jerry) you don't know. You can see that there are multiple characters that say (somewhat) complete sentences, but it doesn't have to be perfect! It takes quite a while to get good results, and often, you'll have to use a smaller vocabulary (and discard uncommon words), or get more data. The Seinfeld dataset is about 3.4 MB, which is big enough for our purposes; for script generation you'll want more than 1 MB of text, generally. # Submitting This Project When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save another copy as an HTML file by clicking "File" -> "Download as.."->"html". Include the "helper.py" and "problem_unittests.py" files in your submission. Once you download these files, compress them into one zip file for submission.
github_jupyter
**Recursion and Higher Order Functions** Today we're tackling recursion, and touching on higher-order functions in Python. A **recursive** function is one that calls itself. A classic example: the Fibonacci sequence. The Fibonacci sequence was originally described to model population growth, and is self-referential in its definition. The nth Fib number is defined in terms of the previous two: - F(n) = F(n-1) + F(n-2) - F(1) = 0 - F(2) = 1 Another classic example: Factorial: - n! = n(n-1)(n-2)(n-3) ... 1 or: - n! = n*(n-1)! Let's look at an implementation of the factorial and of the Fibonacci sequence in Python: ``` def factorial(n): if n == 1: return 1 else: return n*factorial(n-1) print(factorial(5)) def fibonacci(n): if n == 1: return 0 elif n == 2: return 1 else: # print('working on number ' + str(n)) return fibonacci(n-1)+fibonacci(n-2) fibonacci(7) ``` There are two very important parts of these functions: a base case (or two) and a recursive case. When designing recursive functions it can help to think about these two cases! The base case is the case when we know we are done, and can just return a value. (e.g. in fibonacci above there are two base cases, `n ==1` and `n ==2`). The recursive case is the case when we make the recursive call - that is we call the function again. Let's write a function that counts down from a parameter n to zero, and then prints "Blastoff!". ``` def countdown(n): # base case if n == 0: print('Blastoff!') # recursive case else: print(n) countdown(n-1) countdown(10) ``` Let's write a recursive function that adds up the elements of a list: ``` def add_up_list(my_list): # base case if len(my_list) == 0: return 0 # recursive case else: first_elem = my_list[0] return first_elem + add_up_list(my_list[1:]) my_list = [1, 2, 1, 3, 4] print(add_up_list(my_list)) ``` **Higher-order functions** are functions that takes a function as an argument or returns a function. We will talk briefly about functions that take a function as an argument. Let's look at an example. ``` def h(x): return x+4 def g(x): return x**2 def doItTwice(f, x): return f(f(x)) print(doItTwice(h, 3)) print(doItTwice(g, 3)) ``` A common reason for using a higher-order function is to apply a parameter-specified function repeatedly over a data structure (like a list or a dictionary). Let's look at an example function that applies a parameter function to every element of a list: ``` def sampleFunction1(x): return 2*x def sampleFunction2(x): return x % 2 def applyToAll(func, myList): newList = [] for element in myList: newList.append(func(element)) return newList aList = [2, 3, 4, 5] print(applyToAll(sampleFunction1, aList)) print(applyToAll(sampleFunction2, aList)) ``` Something like this applyToAll function is built into Python, and is called map ``` def sampleFunction1(x): return 2*x def sampleFunction2(x): return x % 2 aList = [2, 3, 4, 5] print(list(map(sampleFunction1, aList))) bList = [2, 3, 4, 5] print(list(map(sampleFunction2, aList))) ``` Python has quite a few built-in functions (some higher-order, some not). You can find lots of them here: https://docs.python.org/3.3/library/functions.html (I **will not** by default require you to remember those for an exam!!) Example: zip does something that may be familiar from last week's lab. ``` x = [1, 2, 3] y = [4, 5, 6] zipped = zip(x, y) print(list(zipped)) ```
github_jupyter
# Introduction to `pandas` ``` import numpy as np import pandas as pd ``` ## Series and Data Frames ### Series objects A `Series` is like a vector. All elements must have the same type or are nulls. ``` s = pd.Series([1,1,2,3] + [None]) s ``` ### Size ``` s.size ``` ### Unique Counts ``` s.value_counts() ``` ### Special types of series #### Strings ``` words = 'the quick brown fox jumps over the lazy dog'.split() s1 = pd.Series([' '.join(item) for item in zip(words[:-1], words[1:])]) s1 s1.str.upper() s1.str.split() s1.str.split().str[1] ``` ### Categories ``` s2 = pd.Series(['Asian', 'Asian', 'White', 'Black', 'White', 'Hispanic']) s2 s2 = s2.astype('category') s2 s2.cat.categories s2.cat.codes ``` ### DataFrame objects A `DataFrame` is like a matrix. Columns in a `DataFrame` are `Series`. - Each column in a DataFrame represents a **variale** - Each row in a DataFrame represents an **observation** - Each cell in a DataFrame represents a **value** ``` df = pd.DataFrame(dict(num=[1,2,3] + [None])) df df.num ``` ### Index Row and column identifiers are of `Index` type. Somewhat confusingly, index is also a a synonym for the row identifiers. ``` df.index ``` #### Setting a column as the row index ``` df df1 = df.set_index('num') df1 ``` #### Making an index into a column ``` df1.reset_index() ``` ### Columns This is just a different index object ``` df.columns ``` ### Getting raw values Sometimes you just want a `numpy` array, and not a `pandas` object. ``` df.values ``` ## Creating Data Frames ### Manual ``` from collections import OrderedDict n = 5 dates = pd.date_range(start='now', periods=n, freq='d') df = pd.DataFrame(OrderedDict(pid=np.random.randint(100, 999, n), weight=np.random.normal(70, 20, n), height=np.random.normal(170, 15, n), date=dates, )) df ``` ### From file You can read in data from many different file types - plain text, JSON, spreadsheets, databases etc. Functions to read in data look like `read_X` where X is the data type. ``` %%file measures.txt pid weight height date 328 72.654347 203.560866 2018-11-11 14:16:18.148411 756 34.027679 189.847316 2018-11-12 14:16:18.148411 185 28.501914 158.646074 2018-11-13 14:16:18.148411 507 17.396343 180.795993 2018-11-14 14:16:18.148411 919 64.724301 173.564725 2018-11-15 14:16:18.148411 df = pd.read_table('measures.txt') df ``` ## Indexing Data Frames ### Implicit defaults if you provide a slice, it is assumed that you are asking for rows. ``` df[1:3] ``` If you provide a singe value or list, it is assumed that you are asking for columns. ``` df[['pid', 'weight']] ``` ### Extracting a column #### Dictionary style access ``` df['pid'] ``` #### Property style access This only works for column names tat are also valid Python identifier (i.e., no spaces or dashes or keywords) ``` df.pid ``` ### Indexing by location This is similar to `numpy` indexing ``` df.iloc[1:3, :] df.iloc[1:3, [True, False, True]] ``` ### Indexing by name ``` df.loc[1:3, 'weight':'height'] ``` **Warning**: When using `loc`, the row slice indicates row names, not positions. ``` df1 = df.copy() df1.index = df.index + 1 df1 df1.loc[1:3, 'weight':'height'] ``` ## Structure of a Data Frame ### Data types ``` df.dtypes ``` ### Converting data types #### Using `astype` on one column ``` df.pid = df.pid.astype('category') ``` #### Using `astype` on multiple columns ``` df = df.astype(dict(weight=float, height=float)) ``` #### Using a conversion function ``` df.date = pd.to_datetime(df.date) ``` #### Check ``` df.dtypes ``` ### Basic properties ``` df.size df.shape df.describe() ``` ### Inspection ``` df.head(n=3) df.tail(n=3) df.sample(n=3) df.sample(frac=0.5) ``` ## Selecting, Renaming and Removing Columns ### Selecting columns ``` df.filter(items=['pid', 'date']) df.filter(regex='.*ght') ``` #### Note that you can also use regular string methods on the columns ``` df.loc[:, df.columns.str.contains('d')] ``` ### Renaming columns ``` df.rename(dict(weight='w', height='h'), axis=1) orig_cols = df.columns df.columns = list('abcd') df df.columns = orig_cols df ``` ### Removing columns ``` df.drop(['pid', 'date'], axis=1) df.drop(columns=['pid', 'date']) df.drop(columns=df.columns[df.columns.str.contains('d')]) ``` ## Selecting, Renaming and Removing Rows ### Selecting rows ``` df[df.weight.between(60,70)] df[(69 <= df.weight) & (df.weight < 70)] df[df.date.between(pd.to_datetime('2018-11-13'), pd.to_datetime('2018-11-15 23:59:59'))] ``` ### Renaming rows ``` df.rename({i:letter for i,letter in enumerate('abcde')}) df.index = ['the', 'quick', 'brown', 'fox', 'jumphs'] df df = df.reset_index(drop=True) df ``` ### Dropping rows ``` df.drop([1,3], axis=0) ``` #### Dropping duplicated data ``` df['something'] = [1,1,None,2,None] df.loc[df.something.duplicated()] df.drop_duplicates(subset='something') ``` #### Dropping missing data ``` df df.something.fillna(0) df.something.ffill() df.something.bfill() df.something.interpolate() df.dropna() ``` ## Transforming and Creating Columns ``` df.assign(bmi=df['weight'] / (df['height']/100)**2) df['bmi'] = df['weight'] / (df['height']/100)**2 df df['something'] = [2,2,None,None,3] df ``` ## Sorting Data Frames ### Sort on indexes ``` df.sort_index(axis=1) df.sort_index(axis=0, ascending=False) ``` ### Sort on values ``` df.sort_values(by=['something', 'bmi'], ascending=[True, False]) ``` ## Summarizing ### Apply an aggregation function ``` df.select_dtypes(include=np.number) df.select_dtypes(include=np.number).agg(np.sum) df.agg(['count', np.sum, np.mean]) ``` ## Split-Apply-Combine We often want to perform subgroup analysis (conditioning by some discrete or categorical variable). This is done with `groupby` followed by an aggregate function. Conceptually, we split the data frame into separate groups, apply the aggregate function to each group separately, then combine the aggregated results back into a single data frame. ``` df['treatment'] = list('ababa') df grouped = df.groupby('treatment') grouped.get_group('a') grouped.mean() ``` ### Using `agg` with `groupby` ``` grouped.agg('mean') grouped.agg(['mean', 'std']) grouped.agg({'weight': ['mean', 'std'], 'height': ['min', 'max'], 'bmi': lambda x: (x**2).sum()}) ``` ### Using `trasnform` wtih `groupby` ``` g_mean = grouped['weight', 'height'].transform(np.mean) g_mean g_std = grouped['weight', 'height'].transform(np.std) g_std (df[['weight', 'height']] - g_mean)/g_std ``` ## Combining Data Frames ``` df df1 = df.iloc[3:].copy() df1.drop('something', axis=1, inplace=True) df1 ``` ### Adding rows Note that `pandas` aligns by column indexes automatically. ``` df.append(df1, sort=False) pd.concat([df, df1], sort=False) ``` ### Adding columns ``` df.pid df2 = pd.DataFrame(OrderedDict(pid=[649, 533, 400, 600], age=[23,34,45,56])) df2.pid df.pid = df.pid.astype('int') pd.merge(df, df2, on='pid', how='inner') pd.merge(df, df2, on='pid', how='left') pd.merge(df, df2, on='pid', how='right') pd.merge(df, df2, on='pid', how='outer') ``` ### Merging on the index ``` df1 = pd.DataFrame(dict(x=[1,2,3]), index=list('abc')) df2 = pd.DataFrame(dict(y=[4,5,6]), index=list('abc')) df3 = pd.DataFrame(dict(z=[7,8,9]), index=list('abc')) df1 df2 df3 df1.join([df2, df3]) ``` ## Fixing common DataFrame issues ### Multiple variables in a column ``` df = pd.DataFrame(dict(pid_treat = ['A-1', 'B-2', 'C-1', 'D-2'])) df df.pid_treat.str.split('-') df.pid_treat.str.split('-').apply(pd.Series, index=['pid', 'treat']) ``` ### Multiple values in a cell ``` df = pd.DataFrame(dict(pid=['a', 'b', 'c'], vals = [(1,2,3), (4,5,6), (7,8,9)])) df df[['t1', 't2', 't3']] = df.vals.apply(pd.Series) df df.drop('vals', axis=1, inplace=True) pd.melt(df, id_vars='pid', value_name='vals').drop('variable', axis=1) ``` ## Reshaping Data Frames Sometimes we need to make rows into columns or vice versa. ### Converting multiple columns into a single column This is often useful if you need to condition on some variable. ``` url = 'https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv' iris = pd.read_csv(url) iris.head() iris.shape df_iris = pd.melt(iris, id_vars='species') df_iris.sample(10) ``` ## Chaining commands Sometimes you see this functional style of method chaining that avoids the need for temporary intermediate variables. ``` ( iris. sample(frac=0.2). filter(regex='s.*'). assign(both=iris.sepal_length + iris.sepal_length). groupby('species').agg(['mean', 'sum']). pipe(lambda x: np.around(x, 1)) ) ``` ## Moving between R and Python in Jupyter ``` %load_ext rpy2.ipython import warnings warnings.simplefilter('ignore', FutureWarning) iris = %R iris iris.head() iris_py = iris.copy() iris_py.Species = iris_py.Species.str.upper() %%R -i iris_py -o iris_r iris_r <- iris_py[1:3,] iris_r ```
github_jupyter
``` import os, gc, sys import pygrib import regionmask import cartopy import cartopy.crs as ccrs import numpy as np import pandas as pd import xarray as xr import geopandas as gpd import multiprocessing as mp import matplotlib.pyplot as plt from glob import glob from functools import partial from matplotlib import gridspec from datetime import datetime, timedelta from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib import colors os.environ['OMP_NUM_THREADS'] = '1' # CONFIG # # CONFIG # # CONFIG # # CONFIG # # CONFIG # cwa = 'SEW'#sys.argv[1] fhr_start, fhr_end, fhr_step = 24, 108, 6 start_date = datetime(2020, 10, 1, 0) end_date = datetime(2020, 12, 3, 12) produce_thresholds = [0.01, 0.25, 0.50] bint, bins_custom = 5, None cwa_bounds = { 'WESTUS':[30, 50, -130, -100], 'SEW':[46.0, 49.0, -125.0, -120.5], 'SLC':[37.0, 42.0, -114.0, -110], 'MSO':[44.25, 49.0, -116.75, -112.25], 'MTR':[35.75, 38.75, -123.5, -120.25],} # CONFIG # # CONFIG # # CONFIG # # CONFIG # # CONFIG # nbm_dir = '/scratch/general/lustre/u1070830/nbm/' urma_dir = '/scratch/general/lustre/u1070830/urma/' tmp_dir = '/scratch/general/lustre/u1070830/tmp/' fig_dir = '/uufs/chpc.utah.edu/common/home/steenburgh-group10/mewessler/nbm/' os.makedirs(tmp_dir, exist_ok=True) def resize_colobar(event): # Tell matplotlib to re-draw everything, so that we can get # the correct location from get_position. plt.draw() posn = ax.get_position() colorbar_ax.set_position([posn.x0 + posn.width + 0.01, posn.y0, 0.04, axpos.height]) def calc_pbin(pbin, _bint, _thresh, _data, _urma): p0, p1 = pbin-_bint/2, pbin+_bint/2 N = xr.where((_data >= p0) & (_data < p1), 1, 0).sum(dim=['valid']) n = xr.where((_data >= p0) & (_data < p1) & (_urma > _thresh), 1, 0).sum(dim='valid') return pbin, n, N def calc_pbin_fixed(pbin, _thresh, _data, _urma): p0, p1 = pbin N = xr.where((_data >= p0) & (_data <= p1), 1, 0).sum(dim=['valid']) n = xr.where((_data >= p0) & (_data <= p1) & (_urma > _thresh), 1, 0).sum(dim='valid') return pbin, n, N extract_dir = nbm_dir + 'extract/' extract_flist = sorted(glob(extract_dir + '*')) if not os.path.isfile(urma_dir + 'agg/urma_agg.nc'): pass #print('URMA aggregate not found') else: #print('Getting URMA aggregate from file') urma_whole = xr.open_dataset(urma_dir + 'agg/urma_agg.nc')['apcp24h_mm'] urma_whole = urma_whole/25.4 urma_whole = urma_whole.rename('apcp24h_in') geodir = '../forecast-zones/' zones_shapefile = glob(geodir + '*.shp')[0] # Read the shapefile zones = gpd.read_file(zones_shapefile) # Prune to Western Region using TZ zones = zones.set_index('TIME_ZONE').loc[['M', 'Mm', 'm', 'MP', 'P']].reset_index() cwas = zones.dissolve(by='CWA') pbin_stats_all = {} for thresh in produce_thresholds: for fhr in np.arange(fhr_start, fhr_end+1, fhr_step): open_file = [f for f in extract_flist if 'fhr%03d'%fhr in f][0] print(open_file) # Subset the times nbm = xr.open_dataset(open_file) nbm_time = nbm.valid urma_time = urma_whole.valid time_match = nbm_time[np.in1d(nbm_time, urma_time)].values time_match = np.array([t for t in time_match if pd.to_datetime(t) >= start_date]) time_match = np.array([t for t in time_match if pd.to_datetime(t) <= end_date]) nbm = nbm.sel(valid=time_match) urma = urma_whole.sel(valid=time_match) date0 = pd.to_datetime(time_match[0]).strftime('%Y/%m/%d %H UTC') date1 = pd.to_datetime(time_match[-1]).strftime('%Y/%m/%d %H UTC') nlat, xlat, nlon, xlon = cwa_bounds[cwa] lats, lons = nbm.lat, nbm.lon idx = np.where( (lats >= nlat) & (lats <= xlat) & (lons >= nlon) & (lons <= xlon)) nbm = nbm.isel(x=slice(idx[1].min(), idx[1].max()), y=slice(idx[0].min(), idx[0].max())) urma = urma.isel(x=slice(idx[1].min(), idx[1].max()), y=slice(idx[0].min(), idx[0].max())) # Subset the threshold value nbm = nbm.sel(threshold=thresh)['probx'] total_fc = xr.where(nbm > 0, 1, 0).sum() total_ob = xr.where(urma > thresh, 1, 0).sum() bins = np.arange(0, 101, bint) bins = bins_custom if bins_custom is not None else bins # calc_pbin_mp = partial(calc_pbin, _bint=bint, _thresh=thresh, # _data=nbm, _urma=urma) calc_pbin_mp = partial(calc_pbin_fixed, _thresh=thresh, _data=nbm, _urma=urma) pbin_stats = calc_pbin_mp([60, 80]) # with mp.get_context('fork').Pool(len(bins)) as p: # pbin_stats = p.map(calc_pbin_mp, bins, chunksize=1) # p.close() # p.join() # pbin_stats_all[fhr] = np.array(pbin_stats, dtype=np.int) break break pbins, n, N = pbin_stats levels = np.hstack([0, np.array(pbins), 100])/100 print(levels) p_levs = levels#np.array(pbins)/100 p_levs_locs = p_levs p_colors = ['#5ab4ac','#5ab4ac','#f5f5f5', '#d8b365'] p_cmap = colors.ListedColormap(p_colors, name='p_cmap') fig = plt.figure(figsize=(12, 12), facecolor='w') ax = fig.add_axes([0, 0, 1, 1], projection=ccrs.PlateCarree()) ax.add_feature(cartopy.feature.OCEAN, zorder=100, color='w', edgecolor=None) if cwa == 'WESTUS': cwas.geometry.boundary.plot(color=None, edgecolor='black', linewidth=0.75, ax=ax) ax.coastlines(linewidth=3.5) else: cwas.geometry.boundary.plot(color=None, edgecolor='black', linewidth=2.5, ax=ax) zones.geometry.boundary.plot(color=None, linestyle='--', edgecolor='black', linewidth=0.75, ax=ax) ax.coastlines(linewidth=8) data = xr.where(n > 5, n/N, np.nan) cbd = ax.contourf(data.lon, data.lat, data, levels=levels, alpha=0.5, cmap=p_cmap, vmin=0, vmax=1) nan_shade = xr.where(np.isnan(data), -1, np.nan) ax.contourf(data.lon, data.lat, nan_shade, cmap='gray', alpha=0.25) cbar_ax = fig.add_axes([1.01, .04, .05, .92]) # cbar_ax = fig.add_axes([.85, .04, .02, .92]) cbar = plt.colorbar(cbd, cax=cbar_ax) cbar.ax.tick_params(labelsize=16) fig.canvas.mpl_connect('resize_event', resize_colobar) ax.set_ylim(bottom=cwa_bounds[cwa][0]-0.25, top=cwa_bounds[cwa][1]+0.25) ax.set_xlim(left=cwa_bounds[cwa][2]-0.25, right=cwa_bounds[cwa][3]+0.25) ax.set_title('CWA: %s\nThreshold: %.02f"\nBin: %d%% - %d%%'%(cwa, thresh, pbins[0], pbins[1]), fontsize=16) cbar.set_label(label='\n[< Too Wet] Observed Relative Frequency [Too Dry >]', fontsize=16) ax.grid(True, zorder=-10) print(pbins) plt.show() ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import matplotlib import os, sys import argparse import torch from Code.Utils import from_pickle from Code.models import cartpole from Code.integrate_models import implicit_integration_DEL, integrate_ODE from Code.symo import SyMo_RT from Code.NN import LODE_RT, NODE_RT from Code.models import get_field, cartpole THIS_DIR = os.getcwd() DPI = 100 FORMAT = 'png' LINE_SEGMENTS = 10 ARROW_SCALE = 60 ARROW_WIDTH = 6e-3 LINE_WIDTH = 2 save_dir = "Experiments_cartpole/noise" def get_args(): return {'fig_dir': './figures/cartpole', 'gpu': 2, 'pred_tol': 1e-5 , 'pred_maxiter': 10} class ObjectView(object): def __init__(self, d): self.__dict__ = d args = ObjectView(get_args()) device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu') def load_stats(nn, models): #loads the stats of all models train_loss = [] test_loss= [] int_loss = [] int_std = [] E_loss = [] E_std = [] H_loss = [] H_std = [] for model in models: path = '{}/{}/{}{}-p-32x32_sigma_{}-stats.pkl'.format(THIS_DIR, save_dir, "cartpole-", nn, model) stats = from_pickle(path) if 'SyMo' in nn: train_loss.append(stats['train_loss_poses']) test_loss.append(stats['test_loss_poses']) else: train_loss.append(stats['train_loss_poses']) test_loss.append(stats['test_loss_poses']) int_loss.append(stats['int_loss_poses']) int_std.append(stats['int_std']) E_loss.append(stats['E_loss']) E_std.append(stats['E_std']) if nn != 'NODE-rk4' and nn != "NODE-midpoint": H_loss.append(stats['H_loss']) H_std.append(stats['H_std']) if nn != 'NODE-rk4' and nn != "NODE-midpoint": return train_loss, test_loss, int_loss, int_std, E_loss, E_std, H_loss, H_std else: return train_loss, test_loss, int_loss, int_std, E_loss, E_std def load_stats_noiseless(nn, models): #loads the stats of all models train_loss = [] test_loss= [] int_loss = [] int_std = [] E_loss = [] E_std = [] H_loss = [] H_std = [] for model in models: path = '{}/{}/{}{}-p-{}-stats.pkl'.format(THIS_DIR, "Experiments_cartpole/h=0.05", "cartpole-", nn, model) stats = from_pickle(path) if 'SyMo' in nn: train_loss.append(stats['train_loss_poses']) test_loss.append(stats['test_loss_poses']) else: train_loss.append(stats['train_loss_poses']) test_loss.append(stats['test_loss_poses']) int_loss.append(stats['int_loss_poses']) int_std.append(stats['int_std']) E_loss.append(stats['E_loss']) E_std.append(stats['E_std']) if nn != 'NODE-rk4' and nn != "NODE-midpoint": H_loss.append(stats['H_loss']) H_std.append(stats['H_std']) if nn != 'NODE-rk4' and nn != "NODE-midpoint": return train_loss, test_loss, int_loss, int_std, E_loss, E_std, H_loss, H_std else: return train_loss, test_loss, int_loss, int_std, E_loss, E_std models=["32x32"] #Load E2E-SyMo models train_loss_N_SYMO_noiseless, test_loss_N_SYMO_noiseless, int_loss_N_SYMO_noiseless, int_std_N_SYMO, E_loss_N_SYMO, E_std_N_SYMO, H_loss_N_SYMO, H_std_N_SYMO = load_stats_noiseless('N-SyMo', models) # Load SyMo models train_loss_SYMO_noiseless, test_loss_SYMO_noiseless, int_loss_SYMO_noiseless, int_std_SYMO, E_loss_SYMO, E_std_SYMO, H_loss_SYMO, H_std_SYMO = load_stats_noiseless('SyMo', models) #Load LODE_RK4 models train_loss_LODE_RK4_noiseless, test_loss_LODE_RK4_noiseless, int_loss_LODE_RK4_noiseless, int_std_LODE_RK4, E_loss_LODE_RK4, E_std_LODE_RK4, H_loss_LODE_RK4, H_std_LODE_RK4 = load_stats_noiseless('L-NODE-rk4', models) #Load LODE_RK2 models train_loss_LODE_RK2_noiseless, test_loss_LODE_RK2_noiseless, int_loss_LODE_RK2_noiseless, int_std_LODE_RK2, E_loss_LODE_RK2, E_std_LODE_RK2, H_loss_LODE_RK2, H_std_LODE_RK2 = load_stats_noiseless('L-NODE-midpoint', models) #Load NODE_RK4 models train_loss_NODE_RK4_noiseless, test_loss_NODE_RK4_noiseless, int_loss_NODE_RK4_noiseless, int_std_NODE_RK4, E_loss_NODE_RK4, E_std_NODE_RK4 = load_stats_noiseless('NODE-rk4', models) #Load NODE_RK2 models train_loss_NODE_RK2_noiseless, test_loss_NODE_RK2_noiseless, int_loss_NODE_RK2_noiseless, int_std_NODE_RK2, E_loss_NODE_RK2, E_std_NODE_RK2 = load_stats_noiseless('NODE-midpoint', models) models = [0.0001, 0.0005, 0.001, 0.005, 0.01] #Load E2E-SyMo models train_loss_N_SYMO, test_loss_N_SYMO, int_loss_N_SYMO, int_std_N_SYMO, E_loss_N_SYMO, E_std_N_SYMO, H_loss_N_SYMO, H_std_N_SYMO = load_stats('N-SyMo', models) # Load SyMo models train_loss_SYMO, test_loss_SYMO, int_loss_SYMO, int_std_SYMO, E_loss_SYMO, E_std_SYMO, H_loss_SYMO, H_std_SYMO = load_stats('SyMo', models) #Load LODE_RK4 models train_loss_LODE_RK4, test_loss_LODE_RK4, int_loss_LODE_RK4, int_std_LODE_RK4, E_loss_LODE_RK4, E_std_LODE_RK4, H_loss_LODE_RK4, H_std_LODE_RK4 = load_stats('L-NODE-rk4', models) #Load LODE_RK2 models train_loss_LODE_RK2, test_loss_LODE_RK2, int_loss_LODE_RK2, int_std_LODE_RK2, E_loss_LODE_RK2, E_std_LODE_RK2, H_loss_LODE_RK2, H_std_LODE_RK2 = load_stats('L-NODE-midpoint', models) #Load NODE_RK4 models train_loss_NODE_RK4, test_loss_NODE_RK4, int_loss_NODE_RK4, int_std_NODE_RK4, E_loss_NODE_RK4, E_std_NODE_RK4 = load_stats('NODE-rk4', models) #Load NODE_RK2 models train_loss_NODE_RK2, test_loss_NODE_RK2, int_loss_NODE_RK2, int_std_NODE_RK2, E_loss_NODE_RK2, E_std_NODE_RK2 = load_stats('NODE-midpoint', models) train_loss_N_SYMO = [*train_loss_N_SYMO_noiseless, *train_loss_N_SYMO] train_loss_SYMO = [*train_loss_SYMO_noiseless, *train_loss_SYMO] train_loss_LODE_RK2 = [*train_loss_LODE_RK2_noiseless, *train_loss_LODE_RK2] train_loss_LODE_RK4 = [*train_loss_LODE_RK4_noiseless, *train_loss_LODE_RK4] train_loss_NODE_RK2 = [*train_loss_NODE_RK2_noiseless, *train_loss_NODE_RK2] train_loss_NODE_RK4 = [*train_loss_NODE_RK4_noiseless, *train_loss_NODE_RK4] test_loss_N_SYMO = [*test_loss_N_SYMO_noiseless, *test_loss_N_SYMO] test_loss_SYMO = [*test_loss_SYMO_noiseless, *test_loss_SYMO] test_loss_LODE_RK2 = [*test_loss_LODE_RK2_noiseless, *test_loss_LODE_RK2] test_loss_LODE_RK4 = [*test_loss_LODE_RK4_noiseless, *test_loss_LODE_RK4] test_loss_NODE_RK2 = [*test_loss_NODE_RK2_noiseless, *test_loss_NODE_RK2] test_loss_NODE_RK4 = [*test_loss_NODE_RK4_noiseless, *test_loss_NODE_RK4] int_loss_N_SYMO = [*int_loss_N_SYMO_noiseless, *int_loss_N_SYMO] int_loss_SYMO = [*int_loss_SYMO_noiseless, *int_loss_SYMO] int_loss_LODE_RK2 = [*int_loss_LODE_RK2_noiseless, *int_loss_LODE_RK2] int_loss_LODE_RK4 = [*int_loss_LODE_RK4_noiseless, *int_loss_LODE_RK4] int_loss_NODE_RK2 = [*int_loss_NODE_RK2_noiseless, *int_loss_NODE_RK2] int_loss_NODE_RK4 = [*int_loss_NODE_RK4_noiseless, *int_loss_NODE_RK4] x_axis = np.array([0, 0.0001, 0.0005, 0.001, 0.005, 0.01]) fig = plt.figure(figsize=(18, 5), dpi=DPI) ax1=plt.subplot(1, 3, 1) plt.plot(x_axis.astype('str'), train_loss_NODE_RK4, 'bs-', label='NODE-rk4') plt.plot(x_axis.astype('str'), train_loss_NODE_RK2, 'ms-', label='NODE-midpoint') plt.plot(x_axis.astype('str'), train_loss_LODE_RK4, 'gs-', label= 'L-NODE-rk4') plt.plot(x_axis.astype('str'), train_loss_LODE_RK2, 'ks-', label='L-NODE-midpoint') plt.plot(x_axis.astype('str'), train_loss_SYMO, 'rs-', label='SyMo-midpoint') plt.plot(x_axis.astype('str'), train_loss_N_SYMO, 'cs-', label = 'E2E-SyMo-midpoint') # plt.xscale('log') plt.yscale('log') plt.legend(fontsize=8) plt.ylabel('Train error') plt.xlabel('$\sigma$') plt.subplot(1, 3, 2) plt.plot(x_axis.astype('str'), test_loss_NODE_RK4, 'bs-', label='NODE-rk4') plt.plot(x_axis.astype('str'), test_loss_NODE_RK2, 'ms-', label='NODE-midpoint') plt.plot(x_axis.astype('str'), test_loss_LODE_RK4, 'gs-', label= 'L-NODE-rk4') plt.plot(x_axis.astype('str'), test_loss_LODE_RK2, 'ks-', label='L-NODE-midpoint') plt.plot(x_axis.astype('str'), test_loss_SYMO, 'rs-', label='SyMo-midpoint') plt.plot(x_axis.astype('str'), test_loss_N_SYMO, 'cs-', label = 'E2E-SyMo-midpoint') # plt.xscale('log') plt.yscale('log') plt.legend(fontsize=8) plt.xlabel('$\sigma$') plt.ylabel('Test error') plt.subplot(1, 3, 3) plt.plot(x_axis.astype('str'), int_loss_NODE_RK4, 'bs-', label='NODE-rk4') plt.plot(x_axis.astype('str'), int_loss_NODE_RK2, 'ms-', label='NODE-midpoint') plt.plot(x_axis.astype('str'), int_loss_LODE_RK4, 'gs-', label= 'L-NODE-rk4') plt.plot(x_axis.astype('str'), int_loss_LODE_RK2, 'ks-', label='L-NODE-midpoint') plt.plot(x_axis.astype('str'), int_loss_SYMO, 'rs-', label='SyMo-midpoint') plt.plot(x_axis.astype('str'), int_loss_N_SYMO, 'cs-', label = 'E2E-SyMo-midpoint') # plt.xscale('log') plt.yscale('log') plt.legend(fontsize=8) plt.xlabel('$\sigma$') plt.ylabel('Integration error') fig.savefig('{}/fig-train-pred-loss_noise_cartpole.{}'.format(args.fig_dir, FORMAT)) ```
github_jupyter
# SLU13: Bias-Variance trade-off & Model Selection -- Examples --- <a id='top'></a> ### 1. Model evaluation * a. [Train-test split](#traintest) * b. [Train-val-test split](#val) * c. [Cross validation](#crossval) ### 2. [Learning curves](#learningcurves) # 1. Model evaluation ``` import matplotlib.pyplot as plt import pandas as pd import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import learning_curve %matplotlib inline # Create the DataFrame with the data df = pd.read_csv("data/beer.csv") # Create a DataFrame with the features (X) and labels (y) X = df.drop(["IsIPA"], axis=1) y = df["IsIPA"] print("Number of entries: ", X.shape[0]) ``` <a id='traintest'></a> [Return to top](#top) ## Create a training and a test set ``` from sklearn.model_selection import train_test_split # Using 20 % of the data as test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) print("Number of training entries: ", X_train.shape[0]) print("Number of test entries: ", X_test.shape[0]) ``` <a id='val'></a> [Return to top](#top) ## Create a training, test and validation set ``` # Using 20 % as test set and 20 % as validation set X_train, X_temp, y_train, y_temp = train_test_split(X, y, test_size=0.4) X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, test_size=0.50) print("Number of training entries: ", X_train.shape[0]) print("Number of validation entries: ", X_val.shape[0]) print("Number of test entries: ", X_test.shape[0]) ``` <a id='crossval'></a> [Return to top](#top) ## Use cross-validation (using a given classifier) ``` from sklearn.model_selection import cross_val_score knn = KNeighborsClassifier(n_neighbors=5) # Use cv to specify the number of folds scores = cross_val_score(knn, X, y, cv=5) print(f"Mean of scores: {scores.mean():.3f}") print(f"Variance of scores: {scores.var():.3f}") ``` <a id='learningcurves'></a> [Return to top](#top) # 2. Learning Curves Here is the function that is taken from the sklearn page on learning curves: ``` def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)): """ Generate a simple plot of the test and training learning curve. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. title : string Title for the chart. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. ylim : tuple, shape (ymin, ymax), optional Defines minimum and maximum yvalues plotted. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is not a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validators that can be used here. n_jobs : integer, optional Number of jobs to run in parallel (default 1). """ plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Test Set score") plt.legend(loc="best") return plt # and this is how we used it X = df.select_dtypes(exclude='object').fillna(-1).drop('IsIPA', axis=1) y = df.IsIPA clf = DecisionTreeClassifier(random_state=1, max_depth=5) plot_learning_curve(X=X, y=y, estimator=clf, title='DecisionTreeClassifier'); ``` And remember the internals of what this function is actually doing by knowing how to use the output of the scikit [learning_curve](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html) function ``` # here's where the magic happens! The learning curve function is going # to take your classifier and your training data and subset the data train_sizes, train_scores, test_scores = learning_curve(clf, X, y) # 5 different training set sizes have been selected # with the smallest being 59 and the largest being 594 # the remaining is used for testing print('train set sizes', train_sizes) print('test set sizes', X.shape[0] - train_sizes) # each row corresponds to a training set size # each column corresponds to a cross validation fold # the first row is the highest because it corresponds # to the smallest training set which means that it's very # easy for the classifier to overfit and have perfect # test set predictions while as the test set grows it # becomes a bit more difficult for this to happen. train_scores # The test set scores where again, each row corresponds # to a train / test set size and each column is a differet # run with the same train / test sizes test_scores # Let's average the scores across each fold so that we can plot them train_scores_mean = np.mean(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) # this one isn't quite as cool as the other because it doesn't show the variance # but the fundamentals are still here and it's a much simpler one to understand learning_curve_df = pd.DataFrame({ 'Training score': train_scores_mean, 'Test Set score': test_scores_mean }, index=train_sizes) plt.figure() plt.ylabel("Score") plt.xlabel("Training examples") plt.title('Learning Curve') plt.plot(learning_curve_df); plt.legend(learning_curve_df.columns, loc="best"); ```
github_jupyter
``` import matplotlib.pyplot as plt import numpy as np import scipy.io as scio import linearRegCostFunction as lrcf import trainLinearReg as tlr import learningCurve as lc import polyFeatures as pf import featureNormalize as fn import plotFit as plotft import validationCurve as vc plt.ion() np.set_printoptions(formatter={'float': '{: 0.6f}'.format}) # ===================== Part 1: Loading and Visualizing Data ===================== # We start the exercise by first loading and visualizing the dataset. # The following code will load the dataset into your environment and pot # the data. # # Load Training data print('Loading and Visualizing data ...') # Load from ex5data1: data = scio.loadmat('ex5data1.mat') X = data['X'] y = data['y'].flatten() Xval = data['Xval'] yval = data['yval'].flatten() Xtest = data['Xtest'] ytest = data['ytest'].flatten() m = y.size # Plot training data plt.figure() plt.scatter(X, y, c='r', marker="x") plt.xlabel('Change in water level (x)') plt.ylabel('Water folowing out of the dam (y)') def linear_reg_cost_function(theta, x, y, lmd): # Initialize some useful values m = y.size # You need to return the following variables correctly cost = 0 grad = np.zeros(theta.shape) # ===================== Your Code Here ===================== # Instructions : Compute the cost and gradient of regularized linear # regression for a particular choice of theta # # You should set 'cost' to the cost and 'grad' # to the gradient # h = (x @ theta) cost = 1 * (1/(2*m)) * np.sum(np.square(h - y)) + lmd * (1/(2*m)) * np.sum(np.square(theta[1:])) grad = (1 * (1/m) * (h-y)@x + (lmd * (1/m) * np.r_[0, theta[1:]])) # ========================================================== return cost, grad # ===================== Part 2: Regularized Linear Regression Cost ===================== # You should now implement the cost function for regularized linear regression # theta = np.ones(2) # cost, _ = lrcf.linear_reg_cost_function(theta, np.c_[np.ones(m), X], y, 1) cost, _ = linear_reg_cost_function(theta, np.c_[np.ones(m), X], y, 1) print('Cost at theta = [1 1]: {:0.6f}\n(this value should be about 303.993192'.format(cost)) # ===================== Part 3: Regularized Linear Regression Gradient ===================== # You should now implement the gradient for regularized linear regression # theta = np.ones(2) #cost, grad = lrcf.linear_reg_cost_function(theta, np.c_[np.ones(m), X], y, 1) cost, grad = linear_reg_cost_function(theta, np.c_[np.ones(m), X], y, 1) print('Gradient at theta = [1 1]: {}\n(this value should be about [-15.303016 598.250744]'.format(grad)) import scipy.optimize as opt def train_linear_reg(x, y, lmd): initial_theta = np.ones(x.shape[1]) def cost_func(t): return linear_reg_cost_function(t, x, y, lmd)[0] def grad_func(t): return linear_reg_cost_function(t, x, y, lmd)[1] theta, *unused = opt.fmin_cg(cost_func, initial_theta, grad_func, maxiter=200, disp=False, full_output=True) return theta # ===================== Part 4: Train Linear Regression ===================== # Once you have implemented the cost and gradient correctly, the # train_linear_reg function will use your cost function to train regularzized linear regression. # # Write Up Note : The data is non-linear, so this will not give a great fit. # # Train linear regression with lambda = 0 lmd = 0 # theta = tlr.train_linear_reg(np.c_[np.ones(m), X], y, lmd) theta = train_linear_reg(np.c_[np.ones(m), X], y, lmd) # Plot training data plt.figure() plt.scatter(X, y, c='r', marker="x") plt.xlabel('Change in water level (x)') plt.ylabel('Water folowing out of the dam (y)') # Plot fit over the data plt.plot(X, np.dot(np.c_[np.ones(m), X], theta)) def learning_curve(X, y, Xval, yval, lmd): # Number of training examples m = X.shape[0] # You need to return these values correctly error_train = np.zeros(m) error_val = np.zeros(m) # ===================== Your Code Here ===================== # Instructions : Fill in this function to return training errors in # error_train and the cross validation errors in error_val. # i.e., error_train[i] and error_val[i] should give you # the errors obtained after training on i examples # # Note : You should evaluate the training error on the first i training # examples (i.e. X[:i] and y[:i]) # # For the cross-validation error, you should instead evaluate on # the _entire_ cross validation set (Xval and yval). # # Note : If you're using your cost function (linear_reg_cost_function) # to compute the training and cross validation error, you should # call the function with the lamdba argument set to 0. # Do note that you will still need to use lamdba when running the # training to obtain the theta parameters. # for i in range(1, m+1): theta = train_linear_reg(np.c_[np.ones(i), X[:i]], y[:i], lmd) cost, grad = linear_reg_cost_function(theta, np.c_[np.ones(i), X[:i]], y[:i], 0) error_train[i-1] = cost cost, grad = linear_reg_cost_function(theta, np.c_[np.ones(Xval.shape[0]), Xval], yval, 0) error_val[i-1] = cost # ========================================================== return error_train, error_val # ===================== Part 5: Learning Curve for Linear Regression ===================== # Next, you should implement the learning_curve function. # # Write up note : Since the model is underfitting the data, we expect to # see a graph with "high bias" -- Figure 3 in ex5.pdf # lmd = 0 # error_train, error_val = lc.learning_curve(np.c_[np.ones(m), X], y, np.c_[np.ones(Xval.shape[0]), Xval], yval, lmd) error_train, error_val = learning_curve(np.c_[np.ones(m), X], y, np.c_[np.ones(Xval.shape[0]), Xval], yval, lmd) plt.figure() plt.plot(np.arange(m), error_train, np.arange(m), error_val) plt.title('Learning Curve for Linear Regression') plt.legend(['Train', 'Cross Validation']) plt.xlabel('Number of Training Examples') plt.ylabel('Error') plt.axis([0, 13, 0, 150]) plt.show() def poly_features(X, p): # You need to return the following variable correctly. X_poly = np.zeros((X.size, p)) # ===================== Your Code Here ===================== # Instructions : Given a vector X, return a matrix X_poly where the p-th # column of X contains the values of X to the p-th power. # for i in range(p): X_poly[:, i] = np.power(X, i+1).T # ========================================================== return X_poly # ===================== Part 6 : Feature Mapping for Polynomial Regression ===================== # One solution to this is to use polynomial regression. You should now # complete polyFeatures to map each example into its powers # p = 5 # Map X onto Polynomial Features and Normalize # X_poly = pf.poly_features(X, p) X_poly = poly_features(X, p) X_poly, mu, sigma = fn.feature_normalize(X_poly) X_poly = np.c_[np.ones(m), X_poly] # Map X_poly_test and normalize (using mu and sigma) # X_poly_test = pf.poly_features(Xtest, p) X_poly_test = poly_features(Xtest, p) X_poly_test -= mu X_poly_test /= sigma X_poly_test = np.c_[np.ones(X_poly_test.shape[0]), X_poly_test] # Map X_poly_val and normalize (using mu and sigma) # X_poly_val = pf.poly_features(Xval, p) X_poly_val = poly_features(Xval, p) X_poly_val -= mu X_poly_val /= sigma X_poly_val = np.c_[np.ones(X_poly_val.shape[0]), X_poly_val] print('Normalized Training Example 1 : \n{}'.format(X_poly[0])) def train_linear_reg(x, y, lmd): initial_theta = np.ones(x.shape[1]) def cost_func(t): return linear_reg_cost_function(t, x, y, lmd)[0] def grad_func(t): return linear_reg_cost_function(t, x, y, lmd)[1] theta, *unused = opt.fmin_cg(cost_func, initial_theta, grad_func, maxiter=200, disp=False, full_output=True) return theta def plot_fit(min_x, max_x, mu, sigma, theta, p): x = np.arange(min_x - 15, max_x + 25, 0.05) # X_poly = pf.poly_features(x, p) X_poly = poly_features(x, p) X_poly -= mu X_poly /= sigma X_poly = np.c_[np.ones(x.size), X_poly] plt.plot(x, np.dot(X_poly, theta)) # ===================== Part 7 : Learning Curve for Polynomial Regression ===================== # Now, you will get to experiment with polynomial regression with multiple # values of lambda. The code below runs polynomial regression with # lambda = 0. You should try running the code with different values of # lambda to see how the fit and learning curve change. # lmd = 0 # theta = tlr.train_linear_reg(X_poly, y, lmd) theta = train_linear_reg(X_poly, y, lmd) # Plot trainint data and fit plt.figure() plt.scatter(X, y, c='r', marker="x") # plotft.plot_fit(np.min(X), np.max(X), mu, sigma, theta, p) plot_fit(np.min(X), np.max(X), mu, sigma, theta, p) plt.xlabel('Change in water level (x)') plt.ylabel('Water folowing out of the dam (y)') plt.ylim([0, 60]) plt.title('Polynomial Regression Fit (lambda = {})'.format(lmd)) error_train, error_val = learning_curve(X_poly, y, X_poly_val, yval, lmd) plt.figure() plt.plot(np.arange(m), error_train, np.arange(m), error_val) plt.title('Polynomial Regression Learning Curve (lambda = {})'.format(lmd)) plt.legend(['Train', 'Cross Validation']) plt.xlabel('Number of Training Examples') plt.ylabel('Error') plt.axis([0, 13, 0, 150]) print('Polynomial Regression (lambda = {})'.format(lmd)) print('# Training Examples\tTrain Error\t\tCross Validation Error') for i in range(m): print(' \t{}\t\t{}\t{}'.format(i, error_train[i], error_val[i])) # ===================== Part 7 : Learning Curve for Polynomial Regression ===================== # Now, you will get to experiment with polynomial regression with multiple # values of lambda. The code below runs polynomial regression with # lambda = 0. You should try running the code with different values of # lambda to see how the fit and learning curve change. # lmd = 1 # theta = tlr.train_linear_reg(X_poly, y, lmd) theta = train_linear_reg(X_poly, y, lmd) # Plot trainint data and fit plt.figure() plt.scatter(X, y, c='r', marker="x") # plotft.plot_fit(np.min(X), np.max(X), mu, sigma, theta, p) plot_fit(np.min(X), np.max(X), mu, sigma, theta, p) plt.xlabel('Change in water level (x)') plt.ylabel('Water folowing out of the dam (y)') plt.ylim([0, 60]) plt.title('Polynomial Regression Fit (lambda = {})'.format(lmd)) error_train, error_val = learning_curve(X_poly, y, X_poly_val, yval, lmd) plt.figure() plt.plot(np.arange(m), error_train, np.arange(m), error_val) plt.title('Polynomial Regression Learning Curve (lambda = {})'.format(lmd)) plt.legend(['Train', 'Cross Validation']) plt.xlabel('Number of Training Examples') plt.ylabel('Error') plt.axis([0, 13, 0, 150]) print('Polynomial Regression (lambda = {})'.format(lmd)) print('# Training Examples\tTrain Error\t\tCross Validation Error') for i in range(m): print(' \t{}\t\t{}\t{}'.format(i, error_train[i], error_val[i])) # ===================== Part 7 : Learning Curve for Polynomial Regression ===================== # Now, you will get to experiment with polynomial regression with multiple # values of lambda. The code below runs polynomial regression with # lambda = 0. You should try running the code with different values of # lambda to see how the fit and learning curve change. # lmd = 100 # theta = tlr.train_linear_reg(X_poly, y, lmd) theta = train_linear_reg(X_poly, y, lmd) # Plot trainint data and fit plt.figure() plt.scatter(X, y, c='r', marker="x") # plotft.plot_fit(np.min(X), np.max(X), mu, sigma, theta, p) plot_fit(np.min(X), np.max(X), mu, sigma, theta, p) plt.xlabel('Change in water level (x)') plt.ylabel('Water folowing out of the dam (y)') plt.ylim([0, 60]) plt.title('Polynomial Regression Fit (lambda = {})'.format(lmd)) def validation_curve(X, y, Xval, yval): # Selected values of lambda (don't change this) lambda_vec = np.array([0., 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]) # You need to return these variables correctly. error_train = np.zeros(lambda_vec.size) error_val = np.zeros(lambda_vec.size) # ===================== Your Code Here ===================== # Instructions : Fill in this function to return training errors in # error_train and the validation errors in error_val. The # vector lambda_vec contains the different lambda parameters # to use for each calculation of the errors, i.e, # error_train[i], and error_val[i] should give # you the errors obtained after training with # lmd = lambda_vec[i] # for idx, lmd in enumerate(lambda_vec): e_train, e_val = learning_curve(X, y, Xval, yval, lmd) error_train[idx] = e_train[-1] error_val[idx] = e_val[-1] # ========================================================== return lambda_vec, error_train, error_val # ===================== Part 8 : Validation for Selecting Lambda ===================== # You will now implement validationCurve to test various values of # lambda on a validation set. You will then use this to select the # 'best' lambda value. # lambda_vec, error_train, error_val = vc.validation_curve(X_poly, y, X_poly_val, yval) lambda_vec, error_train, error_val = validation_curve(X_poly, y, X_poly_val, yval) plt.figure() plt.plot(lambda_vec, error_train, lambda_vec, error_val) plt.legend(['Train', 'Cross Validation']) plt.xlabel('lambda') plt.ylabel('Error') ```
github_jupyter
``` 100+ Python challenging programming exercises 1. Level description Level Description Level 1 Beginner means someone who has just gone through an introductory Python course. He can solve some problems with 1 or 2 Python classes or functions. Normally, the answers could directly be found in the textbooks. Level 2 Intermediate means someone who has just learned Python, but already has a relatively strong programming background from before. He should be able to solve problems which may involve 3 or 3 Python classes or functions. The answers cannot be directly be found in the textbooks. Level 3 Advanced. He should use Python to solve more complex problem using more rich libraries functions and data structures and algorithms. He is supposed to solve the problem using several Python standard packages and advanced techniques. 2. Problem template #----------------------------------------# Question Hints Solution 3. Questions #----------------------------------------# Question 1 Level 1 Question: Write a program which will find all such numbers which are divisible by 7 but are not a multiple of 5, between 2000 and 3200 (both included). The numbers obtained should be printed in a comma-separated sequence on a single line. Hints: Consider use range(#begin, #end) method Solution: l=[] for i in range(2000, 3201): if (i%7==0) and (i%5!=0): l.append(str(i)) print ','.join(l) #----------------------------------------# #----------------------------------------# Question 2 Level 1 Question: Write a program which can compute the factorial of a given numbers. The results should be printed in a comma-separated sequence on a single line. Suppose the following input is supplied to the program: 8 Then, the output should be: 40320 Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: def fact(x): if x == 0: return 1 return x * fact(x - 1) x=int(raw_input()) print fact(x) #----------------------------------------# #----------------------------------------# Question 3 Level 1 Question: With a given integral number n, write a program to generate a dictionary that contains (i, i*i) such that is an integral number between 1 and n (both included). and then the program should print the dictionary. Suppose the following input is supplied to the program: 8 Then, the output should be: {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64} Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Consider use dict() Solution: n=int(raw_input()) d=dict() for i in range(1,n+1): d[i]=i*i print d #----------------------------------------# #----------------------------------------# Question 4 Level 1 Question: Write a program which accepts a sequence of comma-separated numbers from console and generate a list and a tuple which contains every number. Suppose the following input is supplied to the program: 34,67,55,33,12,98 Then, the output should be: ['34', '67', '55', '33', '12', '98'] ('34', '67', '55', '33', '12', '98') Hints: In case of input data being supplied to the question, it should be assumed to be a console input. tuple() method can convert list to tuple Solution: values=raw_input() l=values.split(",") t=tuple(l) print l print t #----------------------------------------# #----------------------------------------# Question 5 Level 1 Question: Define a class which has at least two methods: getString: to get a string from console input printString: to print the string in upper case. Also please include simple test function to test the class methods. Hints: Use __init__ method to construct some parameters Solution: class InputOutString(object): def __init__(self): self.s = "" def getString(self): self.s = raw_input() def printString(self): print self.s.upper() strObj = InputOutString() strObj.getString() strObj.printString() #----------------------------------------# #----------------------------------------# Question 6 Level 2 Question: Write a program that calculates and prints the value according to the given formula: Q = Square root of [(2 * C * D)/H] Following are the fixed values of C and H: C is 50. H is 30. D is the variable whose values should be input to your program in a comma-separated sequence. Example Let us assume the following comma separated input sequence is given to the program: 100,150,180 The output of the program should be: 18,22,24 Hints: If the output received is in decimal form, it should be rounded off to its nearest value (for example, if the output received is 26.0, it should be printed as 26) In case of input data being supplied to the question, it should be assumed to be a console input. Solution: #!/usr/bin/env python import math c=50 h=30 value = [] items=[x for x in raw_input().split(',')] for d in items: value.append(str(int(round(math.sqrt(2*c*float(d)/h))))) print ','.join(value) #----------------------------------------# #----------------------------------------# Question 7 Level 2 Question: Write a program which takes 2 digits, X,Y as input and generates a 2-dimensional array. The element value in the i-th row and j-th column of the array should be i*j. Note: i=0,1.., X-1; j=0,1,¡­Y-1. Example Suppose the following inputs are given to the program: 3,5 Then, the output of the program should be: [[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [0, 2, 4, 6, 8]] Hints: Note: In case of input data being supplied to the question, it should be assumed to be a console input in a comma-separated form. Solution: input_str = raw_input() dimensions=[int(x) for x in input_str.split(',')] rowNum=dimensions[0] colNum=dimensions[1] multilist = [[0 for col in range(colNum)] for row in range(rowNum)] for row in range(rowNum): for col in range(colNum): multilist[row][col]= row*col print multilist #----------------------------------------# #----------------------------------------# Question 8 Level 2 Question: Write a program that accepts a comma separated sequence of words as input and prints the words in a comma-separated sequence after sorting them alphabetically. Suppose the following input is supplied to the program: without,hello,bag,world Then, the output should be: bag,hello,without,world Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: items=[x for x in raw_input().split(',')] items.sort() print ','.join(items) #----------------------------------------# #----------------------------------------# Question 9 Level 2 Question£º Write a program that accepts sequence of lines as input and prints the lines after making all characters in the sentence capitalized. Suppose the following input is supplied to the program: Hello world Practice makes perfect Then, the output should be: HELLO WORLD PRACTICE MAKES PERFECT Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: lines = [] while True: s = raw_input() if s: lines.append(s.upper()) else: break; for sentence in lines: print sentence #----------------------------------------# #----------------------------------------# Question 10 Level 2 Question: Write a program that accepts a sequence of whitespace separated words as input and prints the words after removing all duplicate words and sorting them alphanumerically. Suppose the following input is supplied to the program: hello world and practice makes perfect and hello world again Then, the output should be: again and hello makes perfect practice world Hints: In case of input data being supplied to the question, it should be assumed to be a console input. We use set container to remove duplicated data automatically and then use sorted() to sort the data. Solution: s = raw_input() words = [word for word in s.split(" ")] print " ".join(sorted(list(set(words)))) #----------------------------------------# #----------------------------------------# Question 11 Level 2 Question: Write a program which accepts a sequence of comma separated 4 digit binary numbers as its input and then check whether they are divisible by 5 or not. The numbers that are divisible by 5 are to be printed in a comma separated sequence. Example: 0100,0011,1010,1001 Then the output should be: 1010 Notes: Assume the data is input by console. Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: value = [] items=[x for x in raw_input().split(',')] for p in items: intp = int(p, 2) if not intp%5: value.append(p) print ','.join(value) #----------------------------------------# #----------------------------------------# Question 12 Level 2 Question: Write a program, which will find all such numbers between 1000 and 3000 (both included) such that each digit of the number is an even number. The numbers obtained should be printed in a comma-separated sequence on a single line. Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: values = [] for i in range(1000, 3001): s = str(i) if (int(s[0])%2==0) and (int(s[1])%2==0) and (int(s[2])%2==0) and (int(s[3])%2==0): values.append(s) print ",".join(values) #----------------------------------------# #----------------------------------------# Question 13 Level 2 Question: Write a program that accepts a sentence and calculate the number of letters and digits. Suppose the following input is supplied to the program: hello world! 123 Then, the output should be: LETTERS 10 DIGITS 3 Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: s = raw_input() d={"DIGITS":0, "LETTERS":0} for c in s: if c.isdigit(): d["DIGITS"]+=1 elif c.isalpha(): d["LETTERS"]+=1 else: pass print "LETTERS", d["LETTERS"] print "DIGITS", d["DIGITS"] #----------------------------------------# #----------------------------------------# Question 14 Level 2 Question: Write a program that accepts a sentence and calculate the number of upper case letters and lower case letters. Suppose the following input is supplied to the program: Hello world! Then, the output should be: UPPER CASE 1 LOWER CASE 9 Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: s = raw_input() d={"UPPER CASE":0, "LOWER CASE":0} for c in s: if c.isupper(): d["UPPER CASE"]+=1 elif c.islower(): d["LOWER CASE"]+=1 else: pass print "UPPER CASE", d["UPPER CASE"] print "LOWER CASE", d["LOWER CASE"] #----------------------------------------# #----------------------------------------# Question 15 Level 2 Question: Write a program that computes the value of a+aa+aaa+aaaa with a given digit as the value of a. Suppose the following input is supplied to the program: 9 Then, the output should be: 11106 Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: a = raw_input() n1 = int( "%s" % a ) n2 = int( "%s%s" % (a,a) ) n3 = int( "%s%s%s" % (a,a,a) ) n4 = int( "%s%s%s%s" % (a,a,a,a) ) print n1+n2+n3+n4 #----------------------------------------# #----------------------------------------# Question 16 Level 2 Question: Use a list comprehension to square each odd number in a list. The list is input by a sequence of comma-separated numbers. Suppose the following input is supplied to the program: 1,2,3,4,5,6,7,8,9 Then, the output should be: 1,3,5,7,9 Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: values = raw_input() numbers = [x for x in values.split(",") if int(x)%2!=0] print ",".join(numbers) #----------------------------------------# Question 17 Level 2 Question: Write a program that computes the net amount of a bank account based a transaction log from console input. The transaction log format is shown as following: D 100 W 200 D means deposit while W means withdrawal. Suppose the following input is supplied to the program: D 300 D 300 W 200 D 100 Then, the output should be: 500 Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: netAmount = 0 while True: s = raw_input() if not s: break values = s.split(" ") operation = values[0] amount = int(values[1]) if operation=="D": netAmount+=amount elif operation=="W": netAmount-=amount else: pass print netAmount #----------------------------------------# #----------------------------------------# Question 18 Level 3 Question: A website requires the users to input username and password to register. Write a program to check the validity of password input by users. Following are the criteria for checking the password: 1. At least 1 letter between [a-z] 2. At least 1 number between [0-9] 1. At least 1 letter between [A-Z] 3. At least 1 character from [$#@] 4. Minimum length of transaction password: 6 5. Maximum length of transaction password: 12 Your program should accept a sequence of comma separated passwords and will check them according to the above criteria. Passwords that match the criteria are to be printed, each separated by a comma. Example If the following passwords are given as input to the program: ABd1234@1,a F1#,2w3E*,2We3345 Then, the output of the program should be: ABd1234@1 Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solutions: import re value = [] items=[x for x in raw_input().split(',')] for p in items: if len(p)<6 or len(p)>12: continue else: pass if not re.search("[a-z]",p): continue elif not re.search("[0-9]",p): continue elif not re.search("[A-Z]",p): continue elif not re.search("[$#@]",p): continue elif re.search("\s",p): continue else: pass value.append(p) print ",".join(value) #----------------------------------------# #----------------------------------------# Question 19 Level 3 Question: You are required to write a program to sort the (name, age, height) tuples by ascending order where name is string, age and height are numbers. The tuples are input by console. The sort criteria is: 1: Sort based on name; 2: Then sort based on age; 3: Then sort by score. The priority is that name > age > score. If the following tuples are given as input to the program: Tom,19,80 John,20,90 Jony,17,91 Jony,17,93 Json,21,85 Then, the output of the program should be: [('John', '20', '90'), ('Jony', '17', '91'), ('Jony', '17', '93'), ('Json', '21', '85'), ('Tom', '19', '80')] Hints: In case of input data being supplied to the question, it should be assumed to be a console input. We use itemgetter to enable multiple sort keys. Solutions: from operator import itemgetter, attrgetter l = [] while True: s = raw_input() if not s: break l.append(tuple(s.split(","))) print sorted(l, key=itemgetter(0,1,2)) #----------------------------------------# #----------------------------------------# Question 20 Level 3 Question: Define a class with a generator which can iterate the numbers, which are divisible by 7, between a given range 0 and n. Hints: Consider use yield Solution: def putNumbers(n): i = 0 while i<n: j=i i=i+1 if j%7==0: yield j for i in reverse(100): print i #----------------------------------------# #----------------------------------------# Question 21 Level 3 Question£º A robot moves in a plane starting from the original point (0,0). The robot can move toward UP, DOWN, LEFT and RIGHT with a given steps. The trace of robot movement is shown as the following: UP 5 DOWN 3 LEFT 3 RIGHT 2 ¡­ The numbers after the direction are steps. Please write a program to compute the distance from current position after a sequence of movement and original point. If the distance is a float, then just print the nearest integer. Example: If the following tuples are given as input to the program: UP 5 DOWN 3 LEFT 3 RIGHT 2 Then, the output of the program should be: 2 Hints: In case of input data being supplied to the question, it should be assumed to be a console input. Solution: import math pos = [0,0] while True: s = raw_input() if not s: break movement = s.split(" ") direction = movement[0] steps = int(movement[1]) if direction=="UP": pos[0]+=steps elif direction=="DOWN": pos[0]-=steps elif direction=="LEFT": pos[1]-=steps elif direction=="RIGHT": pos[1]+=steps else: pass print int(round(math.sqrt(pos[1]**2+pos[0]**2))) #----------------------------------------# #----------------------------------------# Question 22 Level 3 Question: Write a program to compute the frequency of the words from the input. The output should output after sorting the key alphanumerically. Suppose the following input is supplied to the program: New to Python or choosing between Python 2 and Python 3? Read Python 2 or Python 3. Then, the output should be: 2:2 3.:1 3?:1 New:1 Python:5 Read:1 and:1 between:1 choosing:1 or:2 to:1 Hints In case of input data being supplied to the question, it should be assumed to be a console input. Solution: freq = {} # frequency of words in text line = raw_input() for word in line.split(): freq[word] = freq.get(word,0)+1 words = freq.keys() words.sort() for w in words: print "%s:%d" % (w,freq[w]) #----------------------------------------# #----------------------------------------# Question 23 level 1 Question: Write a method which can calculate square value of number Hints: Using the ** operator Solution: def square(num): return num ** 2 print square(2) print square(3) #----------------------------------------# #----------------------------------------# Question 24 Level 1 Question: Python has many built-in functions, and if you do not know how to use it, you can read document online or find some books. But Python has a built-in document function for every built-in functions. Please write a program to print some Python built-in functions documents, such as abs(), int(), raw_input() And add document for your own function Hints: The built-in document method is __doc__ Solution: print abs.__doc__ print int.__doc__ print raw_input.__doc__ def square(num): '''Return the square value of the input number. The input number must be integer. ''' return num ** 2 print square(2) print square.__doc__ #----------------------------------------# #----------------------------------------# Question 25 Level 1 Question: Define a class, which have a class parameter and have a same instance parameter. Hints: Define a instance parameter, need add it in __init__ method You can init a object with construct parameter or set the value later Solution: class Person: # Define the class parameter "name" name = "Person" def __init__(self, name = None): # self.name is the instance parameter self.name = name jeffrey = Person("Jeffrey") print "%s name is %s" % (Person.name, jeffrey.name) nico = Person() nico.name = "Nico" print "%s name is %s" % (Person.name, nico.name) #----------------------------------------# #----------------------------------------# Question: Define a function which can compute the sum of two numbers. Hints: Define a function with two numbers as arguments. You can compute the sum in the function and return the value. Solution def SumFunction(number1, number2): return number1+number2 print SumFunction(1,2) #----------------------------------------# Question: Define a function that can convert a integer into a string and print it in console. Hints: Use str() to convert a number to string. Solution def printValue(n): print str(n) printValue(3) #----------------------------------------# Question: Define a function that can convert a integer into a string and print it in console. Hints: Use str() to convert a number to string. Solution def printValue(n): print str(n) printValue(3) #----------------------------------------# 2.10 Question: Define a function that can receive two integral numbers in string form and compute their sum and then print it in console. Hints: Use int() to convert a string to integer. Solution def printValue(s1,s2): print int(s1)+int(s2) printValue("3","4") #7 #----------------------------------------# 2.10 Question: Define a function that can accept two strings as input and concatenate them and then print it in console. Hints: Use + to concatenate the strings Solution def printValue(s1,s2): print s1+s2 printValue("3","4") #34 #----------------------------------------# 2.10 Question: Define a function that can accept two strings as input and print the string with maximum length in console. If two strings have the same length, then the function should print al l strings line by line. Hints: Use len() function to get the length of a string Solution def printValue(s1,s2): len1 = len(s1) len2 = len(s2) if len1>len2: print s1 elif len2>len1: print s2 else: print s1 print s2 printValue("one","three") #----------------------------------------# 2.10 Question: Define a function that can accept an integer number as input and print the "It is an even number" if the number is even, otherwise print "It is an odd number". Hints: Use % operator to check if a number is even or odd. Solution def checkValue(n): if n%2 == 0: print "It is an even number" else: print "It is an odd number" checkValue(7) #----------------------------------------# 2.10 Question: Define a function which can print a dictionary where the keys are numbers between 1 and 3 (both included) and the values are square of keys. Hints: Use dict[key]=value pattern to put entry into a dictionary. Use ** operator to get power of a number. Solution def printDict(): d=dict() d[1]=1 d[2]=2**2 d[3]=3**2 print d printDict() #----------------------------------------# 2.10 Question: Define a function which can print a dictionary where the keys are numbers between 1 and 20 (both included) and the values are square of keys. Hints: Use dict[key]=value pattern to put entry into a dictionary. Use ** operator to get power of a number. Use range() for loops. Solution def printDict(): d=dict() for i in range(1,21): d[i]=i**2 print d printDict() #----------------------------------------# 2.10 Question: Define a function which can generate a dictionary where the keys are numbers between 1 and 20 (both included) and the values are square of keys. The function should just print the values only. Hints: Use dict[key]=value pattern to put entry into a dictionary. Use ** operator to get power of a number. Use range() for loops. Use keys() to iterate keys in the dictionary. Also we can use item() to get key/value pairs. Solution def printDict(): d=dict() for i in range(1,21): d[i]=i**2 for (k,v) in d.items(): print v printDict() #----------------------------------------# 2.10 Question: Define a function which can generate a dictionary where the keys are numbers between 1 and 20 (both included) and the values are square of keys. The function should just print the keys only. Hints: Use dict[key]=value pattern to put entry into a dictionary. Use ** operator to get power of a number. Use range() for loops. Use keys() to iterate keys in the dictionary. Also we can use item() to get key/value pairs. Solution def printDict(): d=dict() for i in range(1,21): d[i]=i**2 for k in d.keys(): print k printDict() #----------------------------------------# 2.10 Question: Define a function which can generate and print a list where the values are square of numbers between 1 and 20 (both included). Hints: Use ** operator to get power of a number. Use range() for loops. Use list.append() to add values into a list. Solution def printList(): li=list() for i in range(1,21): li.append(i**2) print li printList() #----------------------------------------# 2.10 Question: Define a function which can generate a list where the values are square of numbers between 1 and 20 (both included). Then the function needs to print the first 5 elements in the list. Hints: Use ** operator to get power of a number. Use range() for loops. Use list.append() to add values into a list. Use [n1:n2] to slice a list Solution def printList(): li=list() for i in range(1,21): li.append(i**2) print li[:5] printList() #----------------------------------------# 2.10 Question: Define a function which can generate a list where the values are square of numbers between 1 and 20 (both included). Then the function needs to print the last 5 elements in the list. Hints: Use ** operator to get power of a number. Use range() for loops. Use list.append() to add values into a list. Use [n1:n2] to slice a list Solution def printList(): li=list() for i in range(1,21): li.append(i**2) print li[-5:] printList() #----------------------------------------# 2.10 Question: Define a function which can generate a list where the values are square of numbers between 1 and 20 (both included). Then the function needs to print all values except the first 5 elements in the list. Hints: Use ** operator to get power of a number. Use range() for loops. Use list.append() to add values into a list. Use [n1:n2] to slice a list Solution def printList(): li=list() for i in range(1,21): li.append(i**2) print li[5:] printList() #----------------------------------------# 2.10 Question: Define a function which can generate and print a tuple where the value are square of numbers between 1 and 20 (both included). Hints: Use ** operator to get power of a number. Use range() for loops. Use list.append() to add values into a list. Use tuple() to get a tuple from a list. Solution def printTuple(): li=list() for i in range(1,21): li.append(i**2) print tuple(li) printTuple() #----------------------------------------# 2.10 Question: With a given tuple (1,2,3,4,5,6,7,8,9,10), write a program to print the first half values in one line and the last half values in one line. Hints: Use [n1:n2] notation to get a slice from a tuple. Solution tp=(1,2,3,4,5,6,7,8,9,10) tp1=tp[:5] tp2=tp[5:] print tp1 print tp2 #----------------------------------------# 2.10 Question: Write a program to generate and print another tuple whose values are even numbers in the given tuple (1,2,3,4,5,6,7,8,9,10). Hints: Use "for" to iterate the tuple Use tuple() to generate a tuple from a list. Solution tp=(1,2,3,4,5,6,7,8,9,10) li=list() for i in tp: if tp[i]%2==0: li.append(tp[i]) tp2=tuple(li) print tp2 #----------------------------------------# 2.14 Question: Write a program which accepts a string as input to print "Yes" if the string is "yes" or "YES" or "Yes", otherwise print "No". Hints: Use if statement to judge condition. Solution s= raw_input() if s=="yes" or s=="YES" or s=="Yes": print "Yes" else: print "No" #----------------------------------------# 3.4 Question: Write a program which can filter even numbers in a list by using filter function. The list is: [1,2,3,4,5,6,7,8,9,10]. Hints: Use filter() to filter some elements in a list. Use lambda to define anonymous functions. Solution li = [1,2,3,4,5,6,7,8,9,10] evenNumbers = filter(lambda x: x%2==0, li) print evenNumbers #----------------------------------------# 3.4 Question: Write a program which can map() to make a list whose elements are square of elements in [1,2,3,4,5,6,7,8,9,10]. Hints: Use map() to generate a list. Use lambda to define anonymous functions. Solution li = [1,2,3,4,5,6,7,8,9,10] squaredNumbers = map(lambda x: x**2, li) print squaredNumbers #----------------------------------------# 3.5 Question: Write a program which can map() and filter() to make a list whose elements are square of even number in [1,2,3,4,5,6,7,8,9,10]. Hints: Use map() to generate a list. Use filter() to filter elements of a list. Use lambda to define anonymous functions. Solution li = [1,2,3,4,5,6,7,8,9,10] evenNumbers = map(lambda x: x**2, filter(lambda x: x%2==0, li)) print evenNumbers #----------------------------------------# 3.5 Question: Write a program which can filter() to make a list whose elements are even number between 1 and 20 (both included). Hints: Use filter() to filter elements of a list. Use lambda to define anonymous functions. Solution evenNumbers = filter(lambda x: x%2==0, range(1,21)) print evenNumbers #----------------------------------------# 3.5 Question: Write a program which can map() to make a list whose elements are square of numbers between 1 and 20 (both included). Hints: Use map() to generate a list. Use lambda to define anonymous functions. Solution squaredNumbers = map(lambda x: x**2, range(1,21)) print squaredNumbers #----------------------------------------# 7.2 Question: Define a class named American which has a static method called printNationality. Hints: Use @staticmethod decorator to define class static method. Solution class American(object): @staticmethod def printNationality(): print "America" anAmerican = American() anAmerican.printNationality() American.printNationality() #----------------------------------------# 7.2 Question: Define a class named American and its subclass NewYorker. Hints: Use class Subclass(ParentClass) to define a subclass. Solution: class American(object): pass class NewYorker(American): pass anAmerican = American() aNewYorker = NewYorker() print anAmerican print aNewYorker #----------------------------------------# 7.2 Question: Define a class named Circle which can be constructed by a radius. The Circle class has a method which can compute the area. Hints: Use def methodName(self) to define a method. Solution: class Circle(object): def __init__(self, r): self.radius = r def area(self): return self.radius**2*3.14 aCircle = Circle(2) print aCircle.area() #----------------------------------------# 7.2 Define a class named Rectangle which can be constructed by a length and width. The Rectangle class has a method which can compute the area. Hints: Use def methodName(self) to define a method. Solution: class Rectangle(object): def __init__(self, l, w): self.length = l self.width = w def area(self): return self.length*self.width aRectangle = Rectangle(2,10) print aRectangle.area() #----------------------------------------# 7.2 Define a class named Shape and its subclass Square. The Square class has an init function which takes a length as argument. Both classes have a area function which can print the area of the shape where Shape's area is 0 by default. Hints: To override a method in super class, we can define a method with the same name in the super class. Solution: class Shape(object): def __init__(self): pass def area(self): return 0 class Square(Shape): def __init__(self, l): Shape.__init__(self) self.length = l def area(self): return self.length*self.length aSquare= Square(3) print aSquare.area() #----------------------------------------# Please raise a RuntimeError exception. Hints: Use raise() to raise an exception. Solution: raise RuntimeError('something wrong') #----------------------------------------# Write a function to compute 5/0 and use try/except to catch the exceptions. Hints: Use try/except to catch exceptions. Solution: def throws(): return 5/0 try: throws() except ZeroDivisionError: print "division by zero!" except Exception, err: print 'Caught an exception' finally: print 'In finally block for cleanup' #----------------------------------------# Define a custom exception class which takes a string message as attribute. Hints: To define a custom exception, we need to define a class inherited from Exception. Solution: class MyError(Exception): """My own exception class Attributes: msg -- explanation of the error """ def __init__(self, msg): self.msg = msg error = MyError("something wrong") #----------------------------------------# Question: Assuming that we have some email addresses in the "[email protected]" format, please write program to print the user name of a given email address. Both user names and company names are composed of letters only. Example: If the following email address is given as input to the program: [email protected] Then, the output of the program should be: john In case of input data being supplied to the question, it should be assumed to be a console input. Hints: Use \w to match letters. Solution: import re emailAddress = raw_input() pat2 = "(\w+)@((\w+\.)+(com))" r2 = re.match(pat2,emailAddress) print r2.group(1) #----------------------------------------# Question: Assuming that we have some email addresses in the "[email protected]" format, please write program to print the company name of a given email address. Both user names and company names are composed of letters only. Example: If the following email address is given as input to the program: [email protected] Then, the output of the program should be: google In case of input data being supplied to the question, it should be assumed to be a console input. Hints: Use \w to match letters. Solution: import re emailAddress = raw_input() pat2 = "(\w+)@(\w+)\.(com)" r2 = re.match(pat2,emailAddress) print r2.group(2) #----------------------------------------# Question: Write a program which accepts a sequence of words separated by whitespace as input to print the words composed of digits only. Example: If the following words is given as input to the program: 2 cats and 3 dogs. Then, the output of the program should be: ['2', '3'] In case of input data being supplied to the question, it should be assumed to be a console input. Hints: Use re.findall() to find all substring using regex. Solution: import re s = raw_input() print re.findall("\d+",s) #----------------------------------------# Question: Print a unicode string "hello world". Hints: Use u'strings' format to define unicode string. Solution: unicodeString = u"hello world!" print unicodeString #----------------------------------------# Write a program to read an ASCII string and to convert it to a unicode string encoded by utf-8. Hints: Use unicode() function to convert. Solution: s = raw_input() u = unicode( s ,"utf-8") print u #----------------------------------------# Question: Write a special comment to indicate a Python source code file is in unicode. Hints: Solution: # -*- coding: utf-8 -*- #----------------------------------------# Question: Write a program to compute 1/2+2/3+3/4+...+n/n+1 with a given n input by console (n>0). Example: If the following n is given as input to the program: 5 Then, the output of the program should be: 3.55 In case of input data being supplied to the question, it should be assumed to be a console input. Hints: Use float() to convert an integer to a float Solution: n=int(raw_input()) sum=0.0 for i in range(1,n+1): sum += float(float(i)/(i+1)) print sum #----------------------------------------# Question: Write a program to compute: f(n)=f(n-1)+100 when n>0 and f(0)=1 with a given n input by console (n>0). Example: If the following n is given as input to the program: 5 Then, the output of the program should be: 500 In case of input data being supplied to the question, it should be assumed to be a console input. Hints: We can define recursive function in Python. Solution: def f(n): if n==0: return 0 else: return f(n-1)+100 n=int(raw_input()) print f(n) #----------------------------------------# Question: The Fibonacci Sequence is computed based on the following formula: f(n)=0 if n=0 f(n)=1 if n=1 f(n)=f(n-1)+f(n-2) if n>1 Please write a program to compute the value of f(n) with a given n input by console. Example: If the following n is given as input to the program: 7 Then, the output of the program should be: 13 In case of input data being supplied to the question, it should be assumed to be a console input. Hints: We can define recursive function in Python. Solution: def f(n): if n == 0: return 0 elif n == 1: return 1 else: return f(n-1)+f(n-2) n=int(raw_input()) print f(n) #----------------------------------------# #----------------------------------------# Question: The Fibonacci Sequence is computed based on the following formula: f(n)=0 if n=0 f(n)=1 if n=1 f(n)=f(n-1)+f(n-2) if n>1 Please write a program using list comprehension to print the Fibonacci Sequence in comma separated form with a given n input by console. Example: If the following n is given as input to the program: 7 Then, the output of the program should be: 0,1,1,2,3,5,8,13 Hints: We can define recursive function in Python. Use list comprehension to generate a list from an existing list. Use string.join() to join a list of strings. In case of input data being supplied to the question, it should be assumed to be a console input. Solution: def f(n): if n == 0: return 0 elif n == 1: return 1 else: return f(n-1)+f(n-2) n=int(raw_input()) values = [str(f(x)) for x in range(0, n+1)] print ",".join(values) #----------------------------------------# Question: Please write a program using generator to print the even numbers between 0 and n in comma separated form while n is input by console. Example: If the following n is given as input to the program: 10 Then, the output of the program should be: 0,2,4,6,8,10 Hints: Use yield to produce the next value in generator. In case of input data being supplied to the question, it should be assumed to be a console input. Solution: def EvenGenerator(n): i=0 while i<=n: if i%2==0: yield i i+=1 n=int(raw_input()) values = [] for i in EvenGenerator(n): values.append(str(i)) print ",".join(values) #----------------------------------------# Question: Please write a program using generator to print the numbers which can be divisible by 5 and 7 between 0 and n in comma separated form while n is input by console. Example: If the following n is given as input to the program: 100 Then, the output of the program should be: 0,35,70 Hints: Use yield to produce the next value in generator. In case of input data being supplied to the question, it should be assumed to be a console input. Solution: def NumGenerator(n): for i in range(n+1): if i%5==0 and i%7==0: yield i n=int(raw_input()) values = [] for i in NumGenerator(n): values.append(str(i)) print ",".join(values) #----------------------------------------# Question: Please write assert statements to verify that every number in the list [2,4,6,8] is even. Hints: Use "assert expression" to make assertion. Solution: li = [2,4,6,8] for i in li: assert i%2==0 #----------------------------------------# Question: Please write a program which accepts basic mathematic expression from console and print the evaluation result. Example: If the following string is given as input to the program: 35+3 Then, the output of the program should be: 38 Hints: Use eval() to evaluate an expression. Solution: expression = raw_input() print eval(expression) #----------------------------------------# Question: Please write a binary search function which searches an item in a sorted list. The function should return the index of element to be searched in the list. Hints: Use if/elif to deal with conditions. Solution: import math def bin_search(li, element): bottom = 0 top = len(li)-1 index = -1 while top>=bottom and index==-1: mid = int(math.floor((top+bottom)/2.0)) if li[mid]==element: index = mid elif li[mid]>element: top = mid-1 else: bottom = mid+1 return index li=[2,5,7,9,11,17,222] print bin_search(li,11) print bin_search(li,12) #----------------------------------------# Question: Please write a binary search function which searches an item in a sorted list. The function should return the index of element to be searched in the list. Hints: Use if/elif to deal with conditions. Solution: import math def bin_search(li, element): bottom = 0 top = len(li)-1 index = -1 while top>=bottom and index==-1: mid = int(math.floor((top+bottom)/2.0)) if li[mid]==element: index = mid elif li[mid]>element: top = mid-1 else: bottom = mid+1 return index li=[2,5,7,9,11,17,222] print bin_search(li,11) print bin_search(li,12) #----------------------------------------# Question: Please generate a random float where the value is between 10 and 100 using Python math module. Hints: Use random.random() to generate a random float in [0,1]. Solution: import random print random.random()*100 #----------------------------------------# Question: Please generate a random float where the value is between 5 and 95 using Python math module. Hints: Use random.random() to generate a random float in [0,1]. Solution: import random print random.random()*100-5 #----------------------------------------# Question: Please write a program to output a random even number between 0 and 10 inclusive using random module and list comprehension. Hints: Use random.choice() to a random element from a list. Solution: import random print random.choice([i for i in range(11) if i%2==0]) #----------------------------------------# Question: Please write a program to output a random number, which is divisible by 5 and 7, between 0 and 10 inclusive using random module and list comprehension. Hints: Use random.choice() to a random element from a list. Solution: import random print random.choice([i for i in range(201) if i%5==0 and i%7==0]) #----------------------------------------# Question: Please write a program to generate a list with 5 random numbers between 100 and 200 inclusive. Hints: Use random.sample() to generate a list of random values. Solution: import random print random.sample(range(100), 5) #----------------------------------------# Question: Please write a program to randomly generate a list with 5 even numbers between 100 and 200 inclusive. Hints: Use random.sample() to generate a list of random values. Solution: import random print random.sample([i for i in range(100,201) if i%2==0], 5) #----------------------------------------# Question: Please write a program to randomly generate a list with 5 numbers, which are divisible by 5 and 7 , between 1 and 1000 inclusive. Hints: Use random.sample() to generate a list of random values. Solution: import random print random.sample([i for i in range(1,1001) if i%5==0 and i%7==0], 5) #----------------------------------------# Question: Please write a program to randomly print a integer number between 7 and 15 inclusive. Hints: Use random.randrange() to a random integer in a given range. Solution: import random print random.randrange(7,16) #----------------------------------------# Question: Please write a program to compress and decompress the string "hello world!hello world!hello world!hello world!". Hints: Use zlib.compress() and zlib.decompress() to compress and decompress a string. Solution: import zlib s = 'hello world!hello world!hello world!hello world!' t = zlib.compress(s) print t print zlib.decompress(t) #----------------------------------------# Question: Please write a program to print the running time of execution of "1+1" for 100 times. Hints: Use timeit() function to measure the running time. Solution: from timeit import Timer t = Timer("for i in range(100):1+1") print t.timeit() #----------------------------------------# Question: Please write a program to shuffle and print the list [3,6,7,8]. Hints: Use shuffle() function to shuffle a list. Solution: from random import shuffle li = [3,6,7,8] shuffle(li) print li #----------------------------------------# Question: Please write a program to shuffle and print the list [3,6,7,8]. Hints: Use shuffle() function to shuffle a list. Solution: from random import shuffle li = [3,6,7,8] shuffle(li) print li #----------------------------------------# Question: Please write a program to generate all sentences where subject is in ["I", "You"] and verb is in ["Play", "Love"] and the object is in ["Hockey","Football"]. Hints: Use list[index] notation to get a element from a list. Solution: subjects=["I", "You"] verbs=["Play", "Love"] objects=["Hockey","Football"] for i in range(len(subjects)): for j in range(len(verbs)): for k in range(len(objects)): sentence = "%s %s %s." % (subjects[i], verbs[j], objects[k]) print sentence #----------------------------------------# Please write a program to print the list after removing delete even numbers in [5,6,77,45,22,12,24]. Hints: Use list comprehension to delete a bunch of element from a list. Solution: li = [5,6,77,45,22,12,24] li = [x for x in li if x%2!=0] print li #----------------------------------------# Question: By using list comprehension, please write a program to print the list after removing delete numbers which are divisible by 5 and 7 in [12,24,35,70,88,120,155]. Hints: Use list comprehension to delete a bunch of element from a list. Solution: li = [12,24,35,70,88,120,155] li = [x for x in li if x%5!=0 and x%7!=0] print li #----------------------------------------# Question: By using list comprehension, please write a program to print the list after removing the 0th, 2nd, 4th,6th numbers in [12,24,35,70,88,120,155]. Hints: Use list comprehension to delete a bunch of element from a list. Use enumerate() to get (index, value) tuple. Solution: li = [12,24,35,70,88,120,155] li = [x for (i,x) in enumerate(li) if i%2!=0] print li #----------------------------------------# Question: By using list comprehension, please write a program generate a 3*5*8 3D array whose each element is 0. Hints: Use list comprehension to make an array. Solution: array = [[ [0 for col in range(8)] for col in range(5)] for row in range(3)] print array #----------------------------------------# Question: By using list comprehension, please write a program to print the list after removing the 0th,4th,5th numbers in [12,24,35,70,88,120,155]. Hints: Use list comprehension to delete a bunch of element from a list. Use enumerate() to get (index, value) tuple. Solution: li = [12,24,35,70,88,120,155] li = [x for (i,x) in enumerate(li) if i not in (0,4,5)] print li #----------------------------------------# Question: By using list comprehension, please write a program to print the list after removing the value 24 in [12,24,35,24,88,120,155]. Hints: Use list's remove method to delete a value. Solution: li = [12,24,35,24,88,120,155] li = [x for x in li if x!=24] print li #----------------------------------------# Question: With two given lists [1,3,6,78,35,55] and [12,24,35,24,88,120,155], write a program to make a list whose elements are intersection of the above given lists. Hints: Use set() and "&=" to do set intersection operation. Solution: set1=set([1,3,6,78,35,55]) set2=set([12,24,35,24,88,120,155]) set1 &= set2 li=list(set1) print li #----------------------------------------# With a given list [12,24,35,24,88,120,155,88,120,155], write a program to print this list after removing all duplicate values with original order reserved. Hints: Use set() to store a number of values without duplicate. Solution: def removeDuplicate( li ): newli=[] seen = set() for item in li: if item not in seen: seen.add( item ) newli.append(item) return newli li=[12,24,35,24,88,120,155,88,120,155] print removeDuplicate(li) #----------------------------------------# Question: Define a class Person and its two child classes: Male and Female. All classes have a method "getGender" which can print "Male" for Male class and "Female" for Female class. Hints: Use Subclass(Parentclass) to define a child class. Solution: class Person(object): def getGender( self ): return "Unknown" class Male( Person ): def getGender( self ): return "Male" class Female( Person ): def getGender( self ): return "Female" aMale = Male() aFemale= Female() print aMale.getGender() print aFemale.getGender() #----------------------------------------# Question: Please write a program which count and print the numbers of each character in a string input by console. Example: If the following string is given as input to the program: abcdefgabc Then, the output of the program should be: a,2 c,2 b,2 e,1 d,1 g,1 f,1 Hints: Use dict to store key/value pairs. Use dict.get() method to lookup a key with default value. Solution: dic = {} s=raw_input() for s in s: dic[s] = dic.get(s,0)+1 print '\n'.join(['%s,%s' % (k, v) for k, v in dic.items()]) #----------------------------------------# Question: Please write a program which accepts a string from console and print it in reverse order. Example: If the following string is given as input to the program: rise to vote sir Then, the output of the program should be: ris etov ot esir Hints: Use list[::-1] to iterate a list in a reverse order. Solution: s=raw_input() s = s[::-1] print s #----------------------------------------# Question: Please write a program which accepts a string from console and print the characters that have even indexes. Example: If the following string is given as input to the program: H1e2l3l4o5w6o7r8l9d Then, the output of the program should be: Helloworld Hints: Use list[::2] to iterate a list by step 2. Solution: s=raw_input() s = s[::2] print s #----------------------------------------# Question: Please write a program which prints all permutations of [1,2,3] Hints: Use itertools.permutations() to get permutations of list. Solution: import itertools print list(itertools.permutations([1,2,3])) #----------------------------------------# Question: Write a program to solve a classic ancient Chinese puzzle: We count 35 heads and 94 legs among the chickens and rabbits in a farm. How many rabbits and how many chickens do we have? Hint: Use for loop to iterate all possible solutions. Solution: def solve(numheads,numlegs): ns='No solutions!' for i in range(numheads+1): j=numheads-i if 2*i+4*j==numlegs: return i,j return ns,ns numheads=35 numlegs=94 solutions=solve(numheads,numlegs) print solutions #----------------------------------------# ```
github_jupyter
# Phi_K advanced tutorial This notebook guides you through the more advanced functionality of the phik package. This notebook will not cover all the underlying theory, but will just attempt to give an overview of all the options that are available. For a theoretical description the user is referred to our paper. The package offers functionality on three related topics: 1. Phik correlation matrix 2. Significance matrix 3. Outlier significance matrix ``` %%capture # install phik (if not installed yet) import sys !"{sys.executable}" -m pip install phik # import standard packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import itertools import phik from phik import resources from phik.binning import bin_data from phik.decorators import * from phik.report import plot_correlation_matrix %matplotlib inline # if one changes something in the phik-package one can automatically reload the package or module %load_ext autoreload %autoreload 2 ``` # Load data A simulated dataset is part of the phik-package. The dataset concerns car insurance data. Load the dataset here: ``` data = pd.read_csv( resources.fixture('fake_insurance_data.csv.gz') ) data.head() ``` ## Specify bin types The phik-package offers a way to calculate correlations between variables of mixed types. Variable types can be inferred automatically although we recommend to variable types to be specified by the user. Because interval type variables need to be binned in order to calculate phik and the significance, a list of interval variables is created. ``` data_types = {'severity': 'interval', 'driver_age':'interval', 'satisfaction':'ordinal', 'mileage':'interval', 'car_size':'ordinal', 'car_use':'ordinal', 'car_color':'categorical', 'area':'categorical'} interval_cols = [col for col, v in data_types.items() if v=='interval' and col in data.columns] interval_cols # interval_cols is used below ``` # Phik correlation matrix Now let's start calculating the correlation phik between pairs of variables. Note that the original dataset is used as input, the binning of interval variables is done automatically. ``` phik_overview = data.phik_matrix(interval_cols=interval_cols) phik_overview ``` ### Specify binning per interval variable Binning can be set per interval variable individually. One can set the number of bins, or specify a list of bin edges. Note that the measured phik correlation is dependent on the chosen binning. The default binning is uniform between the min and max values of the interval variable. ``` bins = {'mileage':5, 'driver_age':[18,25,35,45,55,65,125]} phik_overview = data.phik_matrix(interval_cols=interval_cols, bins=bins) phik_overview ``` ### Do not apply noise correction For low statistics samples often a correlation larger than zero is measured when no correlation is actually present in the true underlying distribution. This is not only the case for phik, but also for the pearson correlation and Cramer's phi (see figure 4 in <font color='red'> XX </font>). In the phik calculation a noise correction is applied by default, to take into account erroneous correlation values as a result of low statistics. To switch off this noise cancellation (not recommended), do: ``` phik_overview = data.phik_matrix(interval_cols=interval_cols, noise_correction=False) phik_overview ``` ### Using a different expectation histogram By default phik compares the 2d distribution of two (binned) variables with the distribution that assumes no dependency between them. One can also change the expected distribution though. Phi_K is calculated in the same way, but using the other expectation distribution. ``` from phik.binning import auto_bin_data from phik.phik import phik_observed_vs_expected_from_rebinned_df, phik_from_hist2d from phik.statistics import get_dependent_frequency_estimates # get observed 2d histogram of two variables cols = ["mileage", "car_size"] icols = ["mileage"] observed = data[cols].hist2d(interval_cols=icols).values # default phik evaluation from observed distribution phik_value = phik_from_hist2d(observed) print (phik_value) # phik evaluation from an observed and expected distribution expected = get_dependent_frequency_estimates(observed) phik_value = phik_from_hist2d(observed=observed, expected=expected) print (phik_value) # one can also compare two datasets against each other, and get a full phik matrix that way. # this needs binned datasets though. # (the user needs to make sure the binnings of both datasets are identical.) data_binned, _ = auto_bin_data(data, interval_cols=interval_cols) # here we are comparing data_binned against itself phik_matrix = phik_observed_vs_expected_from_rebinned_df(data_binned, data_binned) # all off-diagonal entries are zero, meaning the all 2d distributions of both datasets are identical. # (by construction the diagonal is one.) phik_matrix ``` # Statistical significance of the correlation When assessing correlations it is good practise to evaluate both the correlation and the significance of the correlation: a large correlation may be statistically insignificant, and vice versa a small correlation may be very significant. For instance, scipy.stats.pearsonr returns both the pearson correlation and the p-value. Similarly, the phik package offers functionality the calculate a significance matrix. Significance is defined as: $$Z = \Phi^{-1}(1-p)\ ;\quad \Phi(z)=\frac{1}{\sqrt{2\pi}} \int_{-\infty}^{z} e^{-t^{2}/2}\,{\rm d}t $$ Several corrections to the 'standard' p-value calculation are taken into account, making the method more robust for low statistics and sparse data cases. The user is referred to our paper for more details. Due to the corrections, the significance calculation can take a few seconds. ``` significance_overview = data.significance_matrix(interval_cols=interval_cols) significance_overview ``` ### Specify binning per interval variable Binning can be set per interval variable individually. One can set the number of bins, or specify a list of bin edges. Note that the measure phik correlation is dependent on the chosen binning. ``` bins = {'mileage':5, 'driver_age':[18,25,35,45,55,65,125]} significance_overview = data.significance_matrix(interval_cols=interval_cols, bins=bins) significance_overview ``` ### Specify significance method The recommended method to calculate the significance of the correlation is a hybrid approach, which uses the G-test statistic. The number of degrees of freedom and an analytical, empirical description of the $\chi^2$ distribution are sed, based on Monte Carlo simulations. This method works well for both high as low statistics samples. Other approaches to calculate the significance are implemented: - asymptotic: fast, but over-estimates the number of degrees of freedom for low statistics samples, leading to erroneous values of the significance - MC: Many simulated samples are needed to accurately measure significances larger than 3, making this method computationally expensive. ``` significance_overview = data.significance_matrix(interval_cols=interval_cols, significance_method='asymptotic') significance_overview ``` ### Simulation method The chi2 of a contingency table is measured using a comparison of the expected frequencies with the true frequencies in a contingency table. The expected frequencies can be simulated in a variety of ways. The following methods are implemented: - multinominal: Only the total number of records is fixed. (default) - row_product_multinominal: The row totals fixed in the sampling. - col_product_multinominal: The column totals fixed in the sampling. - hypergeometric: Both the row or column totals are fixed in the sampling. (Note that this type of sampling is only available when row and column totals are integers, which is usually the case.) ``` # --- Warning, can be slow # turned off here by default for unit testing purposes #significance_overview = data.significance_matrix(interval_cols=interval_cols, simulation_method='hypergeometric') #significance_overview ``` ### Expected frequencies ``` from phik.simulation import sim_2d_data_patefield, sim_2d_product_multinominal, sim_2d_data inputdata = data[['driver_age', 'area']].hist2d(interval_cols=['driver_age']) inputdata ``` #### Multinominal ``` simdata = sim_2d_data(inputdata.values) print('data total:', inputdata.sum().sum()) print('sim total:', simdata.sum().sum()) print('data row totals:', inputdata.sum(axis=0).values) print('sim row totals:', simdata.sum(axis=0)) print('data column totals:', inputdata.sum(axis=1).values) print('sim column totals:', simdata.sum(axis=1)) ``` #### product multinominal ``` simdata = sim_2d_product_multinominal(inputdata.values, axis=0) print('data total:', inputdata.sum().sum()) print('sim total:', simdata.sum().sum()) print('data row totals:', inputdata.sum(axis=0).astype(int).values) print('sim row totals:', simdata.sum(axis=0).astype(int)) print('data column totals:', inputdata.sum(axis=1).astype(int).values) print('sim column totals:', simdata.sum(axis=1).astype(int)) ``` #### hypergeometric ("patefield") ``` # patefield simulation needs compiled c++ code. # only run this if the python binding to the (compiled) patefiled simulation function is found. try: from phik.simcore import _sim_2d_data_patefield CPP_SUPPORT = True except ImportError: CPP_SUPPORT = False if CPP_SUPPORT: simdata = sim_2d_data_patefield(inputdata.values) print('data total:', inputdata.sum().sum()) print('sim total:', simdata.sum().sum()) print('data row totals:', inputdata.sum(axis=0).astype(int).values) print('sim row totals:', simdata.sum(axis=0)) print('data column totals:', inputdata.sum(axis=1).astype(int).values) print('sim column totals:', simdata.sum(axis=1)) ``` # Outlier significance The normal pearson correlation between two interval variables is easy to interpret. However, the phik correlation between two variables of mixed type is not always easy to interpret, especially when it concerns categorical variables. Therefore, functionality is provided to detect "outliers": excesses and deficits over the expected frequencies in the contingency table of two variables. ### Example 1: mileage versus car_size For the categorical variable pair mileage - car_size we measured: $$\phi_k = 0.77 \, ,\quad\quad \mathrm{significance} = 46.3$$ Let's use the outlier significance functionality to gain a better understanding of this significance correlation between mileage and car size. ``` c0 = 'mileage' c1 = 'car_size' tmp_interval_cols = ['mileage'] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, retbins=True) outlier_signifs ``` ### Specify binning per interval variable Binning can be set per interval variable individually. One can set the number of bins, or specify a list of bin edges. Note: in case a bin is created without any records this bin will be automatically dropped in the phik and (outlier) significance calculations. However, in the outlier significance calculation this will currently lead to an error as the number of provided bin edges does not match the number of bins anymore. ``` bins = [0,1E2, 1E3, 1E4, 1E5, 1E6] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, bins=bins, retbins=True) outlier_signifs ``` ### Specify binning per interval variable -- dealing with underflow and overflow When specifying custom bins as situation can occur when the minimal (maximum) value in the data is smaller (larger) than the minimum (maximum) bin edge. Data points outside the specified range will be collected in the underflow (UF) and overflow (OF) bins. One can choose how to deal with these under/overflow bins, by setting the drop_underflow and drop_overflow variables. Note that the drop_underflow and drop_overflow options are also available for the calculation of the phik matrix and the significance matrix. ``` bins = [1E2, 1E3, 1E4, 1E5] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, bins=bins, retbins=True, drop_underflow=False, drop_overflow=False) outlier_signifs ``` ### Dealing with NaN's in the data Let's add some missing values to our data ``` data.loc[np.random.choice(range(len(data)), size=10), 'car_size'] = np.nan data.loc[np.random.choice(range(len(data)), size=10), 'mileage'] = np.nan ``` Sometimes there can be information in the missing values and in which case you might want to consider the NaN values as a separate category. This can be achieved by setting the dropna argument to False. ``` bins = [1E2, 1E3, 1E4, 1E5] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, bins=bins, retbins=True, drop_underflow=False, drop_overflow=False, dropna=False) outlier_signifs ``` Here OF and UF are the underflow and overflow bin of car_size, respectively. To just ignore records with missing values set dropna to True (default). ``` bins = [1E2, 1E3, 1E4, 1E5] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, bins=bins, retbins=True, drop_underflow=False, drop_overflow=False, dropna=True) outlier_signifs ``` Note that the dropna option is also available for the calculation of the phik matrix and the significance matrix.
github_jupyter
## Support Vector Clustering visualized To get started, please click on the cell with the code below and hit `Shift + Enter` This may take a while. Support Vector Clustering(SVC) is a variation of Support Vector Machine (SVM). SVC is a way of determining a boudary point between different labels. It utilizes a kernel method, helps us to make better decisions on non-linear datasets. In this demo, we will be able to play with 3 parameters, namely `Sample Size`, `C` (Penalty parameter for Cost fucntion), and `gamma` (Kernel coefficent) ``` %matplotlib notebook import matplotlib.pyplot as plt import numpy as np from ipywidgets import * from IPython.display import display from sklearn.svm import SVC plt.style.use('ggplot') def plot_data(data, labels, sep): data_x = data[:, 0] data_y = data[:, 1] sep_x = sep[:, 0] sep_y = sep[:, 1] # plot data fig = plt.figure(figsize=(4, 4)) pos_inds = np.argwhere(labels == 1) pos_inds = [s[0] for s in pos_inds] neg_inds = np.argwhere(labels == -1) neg_inds = [s[0] for s in neg_inds] plt.scatter(data_x[pos_inds], data_y[pos_inds], color='b', linewidth=1, marker='o', edgecolor='k', s=50) plt.scatter(data_x[neg_inds], data_y[neg_inds], color='r', linewidth=1, marker='o', edgecolor='k', s=50) # plot target plt.plot(sep_x, sep_y, '--k', linewidth=3) # clean up plot plt.yticks([], []) plt.xlim([-2.1, 2.1]) plt.ylim([-2.1, 2.1]) plt.axis('off') return plt def update_plot_data(plt, data, labels, sep): plt.cla() plt.clf() data_x = data[:, 0] data_y = data[:, 1] sep_x = sep[:, 0] sep_y = sep[:, 1] # plot data #plt.draw(figsize=(4, 4)) pos_inds = np.argwhere(labels == 1) pos_inds = [s[0] for s in pos_inds] neg_inds = np.argwhere(labels == -1) neg_inds = [s[0] for s in neg_inds] plt.scatter(data_x[pos_inds], data_y[pos_inds], color='b', linewidth=1, marker='o', edgecolor='k', s=50) plt.scatter(data_x[neg_inds], data_y[neg_inds], color='r', linewidth=1, marker='o', edgecolor='k', s=50) # plot target plt.plot(sep_x, sep_y, '--k', linewidth=3) # clean up plot plt.yticks([], []) plt.xlim([-2.1, 2.1]) plt.ylim([-2.1, 2.1]) plt.axis('off') # plot approximation def plot_approx(clf): # plot classification boundary and color regions appropriately r = np.linspace(-2.1, 2.1, 500) s, t = np.meshgrid(r, r) s = np.reshape(s, (np.size(s), 1)) t = np.reshape(t, (np.size(t), 1)) h = np.concatenate((s, t), 1) # use classifier to make predictions z = clf.predict(h) # reshape predictions for plotting s.shape = (np.size(r), np.size(r)) t.shape = (np.size(r), np.size(r)) z.shape = (np.size(r), np.size(r)) # show the filled in predicted-regions of the plane plt.contourf(s, t, z, colors=['r', 'b'], alpha=0.2, levels=range(-1, 2)) # show the classification boundary if it exists if len(np.unique(z)) > 1: plt.contour(s, t, z, colors='k', linewidths=3) def update_plot_approx(plt, clf): # plot classification boundary and color regions appropriately r = np.linspace(-2.1, 2.1, 500) s, t = np.meshgrid(r, r) s = np.reshape(s, (np.size(s), 1)) t = np.reshape(t, (np.size(t), 1)) h = np.concatenate((s, t), 1) # use classifier to make predictions z = clf.predict(h) # reshape predictions for plotting s.shape = (np.size(r), np.size(r)) t.shape = (np.size(r), np.size(r)) z.shape = (np.size(r), np.size(r)) plt.cla() plt.clf() # show the filled in predicted-regions of the plane plt.contourf(s, t, z, colors=['r', 'b'], alpha=0.2, levels=range(-1, 2)) # show the classification boundary if it exists if len(np.unique(z)) > 1: plt.contour(s, t, z, colors='k', linewidths=3) def make_circle_classification_dataset(num_pts): ''' This function generates a random circle dataset with two classes. You can run this a couple times to get a distribution you like visually. You can also adjust the num_pts parameter to change the total number of points in the dataset. ''' # generate points num_misclass = 5 # total number of misclassified points s = np.random.rand(num_pts) data_x = np.cos(2 * np.pi * s) data_y = np.sin(2 * np.pi * s) radi = 2 * np.random.rand(num_pts) data_x = data_x * radi data_y = data_y * radi data_x.shape = (len(data_x), 1) data_y.shape = (len(data_y), 1) data = np.concatenate((data_x, data_y), axis=1) # make separator s = np.linspace(0, 1, 100) x_f = np.cos(2 * np.pi * s) y_f = np.sin(2 * np.pi * s) x_f.shape = (len(x_f), 1) y_f.shape = (len(y_f), 1) sep = np.concatenate((x_f, y_f), axis=1) # make labels and flip a few to show some misclassifications labels = radi.copy() ind1 = np.argwhere(labels > 1) ind1 = [v[0] for v in ind1] ind2 = np.argwhere(labels <= 1) ind2 = [v[0] for v in ind2] labels[ind1] = -1 labels[ind2] = +1 flip = np.random.permutation(num_pts) flip = flip[:num_misclass] for i in flip: labels[i] = (-1) * labels[i] # return datapoints and labels for study return data, labels, sep sample_size = widgets.IntSlider( value=50, min=50, max=1000, step=1, description='Sample size: ', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', slider_color='white' ) split_ratio = widgets.FloatSlider( value=0.2, min=0, max=1.0, step=0.1, description='Train/Test Split Ratio (0-1): ', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', slider_color='white' ) c = widgets.FloatSlider( value=0.1, min=0.1, max=10.0, step=0.1, description='C: ', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', slider_color='white' ) gamma = widgets.FloatSlider( value=0.1, min=0.1, max=10.0, step=0.1, description='Gamma: ', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', slider_color='white' ) display(sample_size) #init plot data, labels, true_sep = make_circle_classification_dataset(num_pts=sample_size.value) # preparing the plot clf = SVC(C=c.value, kernel='rbf', gamma=gamma.value) # fit classifier clf.fit(data, labels) # plot results fit_plot = plot_data(data, labels, true_sep) plot_approx(clf) def on_train_info_change(change): clf = SVC(C=c.value, kernel='rbf', gamma=gamma.value) # fit classifier clf.fit(data, labels) # plot results update_plot_data(fit_plot, data, labels, true_sep) plot_approx(clf) def on_value_change_sample(change): global data global labels global true_sep data, labels, true_sep = make_circle_classification_dataset(num_pts=sample_size.value) update_plot_data(fit_plot, data, labels, true_sep) clf = SVC(C=c.value,kernel='rbf',gamma=gamma.value) # fit classifier clf.fit(data, labels) # plot results update_plot_data(fit_plot, data, labels, true_sep) plot_approx(clf) sample_size.observe(on_value_change_sample, names='value') display(c) display(gamma) c.observe(on_train_info_change, names='value') gamma.observe(on_train_info_change, names='value') ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import cartopy.crs as ccrs migration_patterns = pd.read_csv("./arctic_tern_migration.csv") #Data file, needs to be in working directory #Adds "Month" column to pd dataframe - "Date" is a string: "DD/MM/YYYY", takes characters 3:5=3,4 -> MM and converts it to an integer, then added to dataframe migration_patterns_months = [int(migration_patterns['Date'][i][3:5]) for i in range(len(migration_patterns["Date"]))] migration_patterns["Month"] = migration_patterns_months #Southbound - mostly between Aug/Oct but takes times from Jul/Nov, and latitudes between +/- 40 migration_patterns_sbound = migration_patterns.where(migration_patterns["Month"] > 6).where(migration_patterns["Month"] < 12)\ .where(np.abs(migration_patterns["Lat"]) <= 40).dropna(how="all") #Rounds latitude for grouping migration_patterns_sbound["Lat"] = np.round(migration_patterns_sbound["Lat"]) #Latitudes grouped and mean longitude computed. rolling used to smooth data - roll_mean_over can be changed, roll_mean_over = 1 <=> no rolling mean. roll_mean_over = 3 #migration_patterns_sbound.groupby("Lat").mean("Long")["Long"].rolling(roll_mean_over,center=True).mean() fig = plt.figure(figsize=[15,8]) ax = fig.add_subplot(111, projection=ccrs.PlateCarree(0)) ax.coastlines() ax.gridlines() ax.plot(migration_patterns_sbound.groupby("Lat").mean("Long")["Long"].rolling(roll_mean_over,center=True).mean()[::3], np.arange(-40,40)[::3]) fig.savefig("./meanpath.png") #To plot: fig = plt.figure(figsize=[15,8]) ax = fig.add_subplot(111, projection=ccrs.PlateCarree(0)) ax.coastlines() ax.gridlines() SA_keys = [] AF_keys = [] IO_keys = [] for key, grp in migration_patterns_sbound.groupby(['Bird ID']): if grp["Long"].mean() < -20: if grp["Bird ID"].all() != "ARTE_390": #removes anomalous bird ax = grp.plot(ax=ax, kind='line', x='Long', y='Lat', color="r", linewidth=1, alpha=0.5) SA_keys.append(key) if np.abs(grp["Long"]).mean() <= 20: ax = grp.plot(ax=ax, kind='line', x='Long', y='Lat', color="b", linewidth=1, alpha=0.5) AF_keys.append(key) if grp["Long"].mean() > 20: ax = grp.plot(ax=ax, kind='line', x='Long', y='Lat', color="g", linewidth=1, alpha=0.5) IO_keys.append(key) migration_patterns_sbound_SA = migration_patterns_sbound migration_patterns_sbound_AF = migration_patterns_sbound migration_patterns_sbound_IO = migration_patterns_sbound for key in SA_keys: migration_patterns_sbound_IO = migration_patterns_sbound_IO.where(migration_patterns_sbound_IO["Bird ID"] != key) migration_patterns_sbound_AF = migration_patterns_sbound_AF.where(migration_patterns_sbound_AF["Bird ID"] != key) for key in AF_keys: migration_patterns_sbound_SA = migration_patterns_sbound_SA.where(migration_patterns_sbound_SA["Bird ID"] != key) migration_patterns_sbound_IO = migration_patterns_sbound_IO.where(migration_patterns_sbound_IO["Bird ID"] != key) for key in IO_keys: migration_patterns_sbound_SA = migration_patterns_sbound_SA.where(migration_patterns_sbound_SA["Bird ID"] != key) migration_patterns_sbound_AF = migration_patterns_sbound_AF.where(migration_patterns_sbound_AF["Bird ID"] != key) migration_patterns_sbound_SA = migration_patterns_sbound_SA.where(migration_patterns_sbound_SA["Bird ID"] != "ARTE_390") migration_patterns_sbound_AF = migration_patterns_sbound_AF.where(migration_patterns_sbound_AF["Bird ID"] != "ARTE_390") migration_patterns_sbound_IO = migration_patterns_sbound_IO.where(migration_patterns_sbound_IO["Bird ID"] != "ARTE_390") #ax.plot(migration_patterns_sbound.groupby("Lat").mean("Long")["Long"].rolling(9,center=True).mean(), np.arange(-40,40), color="k", linewidth=3) ax.plot(migration_patterns_sbound_SA.groupby("Lat").mean("Long")["Long"].rolling(roll_mean_over,center=True).mean()[::-roll_mean_over], sorted(migration_patterns_sbound_SA["Lat"].unique()[1:])[::-roll_mean_over], color="r", linewidth=4) ax.plot(migration_patterns_sbound_IO.groupby("Lat").mean("Long")["Long"].rolling(roll_mean_over,center=True).mean()[::-roll_mean_over], sorted(migration_patterns_sbound_IO["Lat"].unique()[1:])[::-roll_mean_over], color="g", linewidth=4) ax.plot(migration_patterns_sbound_AF.groupby("Lat").mean("Long")["Long"].rolling(roll_mean_over,center=True).mean()[::-roll_mean_over], np.unique(sorted(migration_patterns_sbound_AF["Lat"].unique()[1:]))[::-roll_mean_over], color="b", linewidth=4) legend = ax.legend() legend.remove() ax.set_xlim(-90,90) ax.set_ylim(-90,90) fig.savefig("./figures/meanpath_grouped.png", dpi=150, bbox_inches="tight") np.save("./migration_southbound_SA_long.npy", migration_patterns_sbound_SA.groupby("Lat").mean("Long")["Long"].rolling(roll_mean_over, center=True).mean().values[::-roll_mean_over]) np.save("./migration_southbound_IO_long.npy", migration_patterns_sbound_IO.groupby("Lat").mean("Long")["Long"].rolling(roll_mean_over,center=True).mean().values[::-roll_mean_over]) np.save("./migration_southbound_AF_long.npy", migration_patterns_sbound_AF.groupby("Lat").mean("Long")["Long"].rolling(roll_mean_over,center=True).mean().values[::-roll_mean_over]) np.save("./migration_southbound_SA_lat.npy", sorted(migration_patterns_sbound_SA["Lat"].unique()[1:])[::-roll_mean_over]) np.save("./migration_southbound_IO_lat.npy", sorted(migration_patterns_sbound_IO["Lat"].unique()[1:])[::-roll_mean_over]) np.save("./migration_southbound_AF_lat.npy", np.unique(sorted(migration_patterns_sbound_AF["Lat"].unique()[1:]))[::-roll_mean_over]) #Note: a rolling mean is taken over n=roll_mean_over values, and every nth value is taken, as a simple low pass filter. ```
github_jupyter
``` import tabula import numpy as np import pandas as pd import os from pathlib import Path import PyPDF2 import re import requests import json import time # filenames = [ # os.path.expanduser('/home/parth/Documents/USICT/it_res.pdf'), # os.path.expanduser('/home/parth/Documents/USICT/cse_res.pdf'), # os.path.expanduser('/home/parth/Documents/USICT/ece_res.pdf')] # filenames = [ # os.path.expanduser('~/Documents/USICT/ipu_results/cse_even_sems.pdf'), # os.path.expanduser('~/Documents/USICT/ipu_results/ece_even_sems.pdf') # ] # filenames = [ # os.path.expanduser('~/Documents/USICT/ipu_results/it_even_sems.pdf') # ] filenames = [ os.path.expanduser('/home/parth/Documents/USICT/it_res.pdf'), os.path.expanduser('/home/parth/Documents/USICT/cse_res.pdf'), os.path.expanduser('/home/parth/Documents/USICT/ece_res.pdf'), os.path.expanduser('~/Documents/USICT/ipu_results/cse_even_sems.pdf'), os.path.expanduser('~/Documents/USICT/ipu_results/ece_even_sems.pdf'), os.path.expanduser('~/Documents/USICT/ipu_results/it_even_sems.pdf') ] scheme_reg = re.compile(r'scheme\s+of\s+examinations',re.IGNORECASE) institution_reg = re.compile(r'institution\s*:\s*([\w\n(,)& ]+)\nS\.No',re.IGNORECASE) sem_reg = re.compile(r'se\s?m[.//\w\n]+:\s+([\w\n]+)',re.IGNORECASE) programme_reg = re.compile(r'programme\s+name:\s+([\w(,)& \n]+)SchemeID',re.IGNORECASE) branch_reg = re.compile(r'[\w &]+\(([\w ]+)\)') def get_info(text) : college = institution_reg.search(text)[1].replace('\n','').strip().title() semester = int(sem_reg.search(text)[1].replace('\n','').strip()) course = programme_reg.search(text)[1].replace('\n','').strip().title() branch = branch_reg.search(course)[1].strip().title() course = course[0:course.find('(')].strip() info = { 'college' : college, 'semester' : semester, 'course' : course, 'branch' : branch, } return info SITE = "https://api-rhapsody.herokuapp.com/academia" # SITE = "http://localhost:3000/academia" #Add college data ={ 'college' : { 'college' : "University School Of Information, Communication & Technology (Formerly Usit)" } } r = requests.post(SITE+"/college",json=data) print(r,r.content) def already_exists(info) : r = requests.get(SITE+"/semester",params=info) content = json.loads(r.content) # print(r.status_code,r.content) return r.status_code == 200 and content != {} def getSubjects(df) : subjects = [] for index,row in df.iterrows() : subject = {} subject['subject'] = row['Subject'].strip().title() subject['subjectCode'] = row['Code'] subject['credits'] = row['Credit'] subjects.append(subject) return subjects for filename in filenames : pdf = PyPDF2.PdfFileReader(filename) print(filename,pdf.getNumPages()) for i in range(0,pdf.getNumPages()) : text = pdf.getPage(i).extractText() if scheme_reg.search(text) : info = get_info(text) df = tabula.read_pdf(filename,pages=i+1) subjects = getSubjects(df[0]) if already_exists(info) : print("information already exists") continue info['semester'] = {'semester' : info['semester'], 'subjects' : subjects} r = requests.post(SITE+"/semester",json=info) print(r,r.content) # time.sleep(2) # print(info) from IPython.display import display ```
github_jupyter
``` import requests from random import randint from time import sleep from bs4 import BeautifulSoup import pandas as pd # Maintenant nous avons un résumé au dessus de la fonction def get_url_micro_onde_tunisianet(): url_micro_onde_details = [] urls = [ "https://www.tunisianet.com.tn/564-four-electrique-tunisie-micro-onde" ] for page in range(2,5): url = f"https://www.tunisianet.com.tn/564-four-electrique-tunisie-micro-onde?page={page}" response = requests.get(url) page_contents = response.text if response.status_code != 200: raise Exception('Failed to load page {}'.format(items_url)) doc = BeautifulSoup(page_contents, "html.parser") for item in doc.find_all("a", {'class': "thumbnail product-thumbnail first-img"}): url_micro_onde_details.append(item['href']) for page in urls: url = page response = requests.get(url) page_contents = response.text if response.status_code != 200: raise Exception('Failed to load page {}'.format(items_url)) doc = BeautifulSoup(page_contents, "html.parser") for item in doc.find_all("a", {'class': "thumbnail product-thumbnail first-img"}): url_micro_onde_details.append(item['href']) return url_micro_onde_details url_micro_onde = get_url_micro_onde_tunisianet() len(url_micro_onde) def get_micro_onde(items_url): images_micro_ondes = [] # télécharger la page response = requests.get(items_url) # vérifier le succès de réponse if response.status_code != 200: raise Exception('Failed to load page {}'.format(items_url)) # Parser la réponse à l'aide de beaufifulSoup doc = BeautifulSoup(response.text, 'html.parser') for i, img in enumerate(doc.find_all('a', {'class': 'thumb-container'})): if i>= 1 and len(doc.find_all('a', {'class': 'thumb-container'})) > 1: images_micro_ondes.append(img['data-image']) return images_micro_ondes images_micro_ondes = [] for url in url_micro_onde: for image in get_micro_onde(url): images_micro_ondes.append(image) len(images_micro_ondes) import random import urllib.request import os def download_micro_ondes(urls, doc): os.makedirs(os.path.join('images', doc)) for i, url in enumerate(urls): try: fullname = "images/" + doc + "/" + str((i+1))+".jpg" urllib.request.urlretrieve(url,fullname) except: pass download_micro_ondes(images_micro_ondes, 'micro_onde') ```
github_jupyter
# Description This notebook runs some pre-analyses using DBSCAN to explore the best set of parameters (`min_samples` and `eps`) to cluster `pca` data version. # Environment variables ``` from IPython.display import display import conf N_JOBS = conf.GENERAL["N_JOBS"] display(N_JOBS) %env MKL_NUM_THREADS=$N_JOBS %env OPEN_BLAS_NUM_THREADS=$N_JOBS %env NUMEXPR_NUM_THREADS=$N_JOBS %env OMP_NUM_THREADS=$N_JOBS ``` # Modules loading ``` %load_ext autoreload %autoreload 2 from pathlib import Path import numpy as np import pandas as pd from sklearn.neighbors import NearestNeighbors from sklearn.metrics import pairwise_distances from sklearn.cluster import DBSCAN from sklearn.metrics import ( silhouette_score, calinski_harabasz_score, davies_bouldin_score, ) import matplotlib.pyplot as plt import seaborn as sns from utils import generate_result_set_name from clustering.ensembles.utils import generate_ensemble ``` # Global settings ``` np.random.seed(0) CLUSTERING_ATTRIBUTES_TO_SAVE = ["n_clusters"] ``` # Data version: pca ``` INPUT_SUBSET = "pca" INPUT_STEM = "z_score_std-projection-smultixcan-efo_partial-mashr-zscores" DR_OPTIONS = { "n_components": 50, "svd_solver": "full", "random_state": 0, } input_filepath = Path( conf.RESULTS["DATA_TRANSFORMATIONS_DIR"], INPUT_SUBSET, generate_result_set_name( DR_OPTIONS, prefix=f"{INPUT_SUBSET}-{INPUT_STEM}-", suffix=".pkl" ), ).resolve() display(input_filepath) assert input_filepath.exists(), "Input file does not exist" input_filepath_stem = input_filepath.stem display(input_filepath_stem) data = pd.read_pickle(input_filepath) data.shape data.head() ``` ## Tests different k values (k-NN) ``` # `k_values` is the full range of k for kNN, whereas `k_values_to_explore` is a # subset that will be explored in this notebook. If the analysis works, then # `k_values` and `eps_range_per_k` below are copied to the notebook that will # produce the final DBSCAN runs (`../002_[...]-dbscan-....ipynb`) k_values = np.arange(2, 125 + 1, 1) k_values_to_explore = (2, 5, 10, 15, 20, 30, 40, 50, 75, 100, 125) results = {} for k in k_values_to_explore: nbrs = NearestNeighbors(n_neighbors=k, n_jobs=N_JOBS).fit(data) distances, indices = nbrs.kneighbors(data) results[k] = (distances, indices) eps_range_per_k = { k: (10, 20) if k < 5 else (11, 25) if k < 10 else (12, 30) if k < 15 else (13, 35) if k < 20 else (14, 40) for k in k_values } eps_range_per_k_to_explore = {k: eps_range_per_k[k] for k in k_values_to_explore} for k, (distances, indices) in results.items(): d = distances[:, 1:].mean(axis=1) d = np.sort(d) fig, ax = plt.subplots() plt.plot(d) r = eps_range_per_k_to_explore[k] plt.hlines(r[0], 0, data.shape[0], color="red") plt.hlines(r[1], 0, data.shape[0], color="red") plt.xlim((3000, data.shape[0])) plt.title(f"k={k}") display(fig) plt.close(fig) ``` # Extended test ## Generate clusterers ``` CLUSTERING_OPTIONS = {} # K_RANGE is the min_samples parameter in DBSCAN (sklearn) CLUSTERING_OPTIONS["K_RANGE"] = k_values_to_explore CLUSTERING_OPTIONS["EPS_RANGE_PER_K"] = eps_range_per_k_to_explore CLUSTERING_OPTIONS["EPS_STEP"] = 33 CLUSTERING_OPTIONS["METRIC"] = "euclidean" display(CLUSTERING_OPTIONS) CLUSTERERS = {} idx = 0 for k in CLUSTERING_OPTIONS["K_RANGE"]: eps_range = CLUSTERING_OPTIONS["EPS_RANGE_PER_K"][k] eps_values = np.linspace(eps_range[0], eps_range[1], CLUSTERING_OPTIONS["EPS_STEP"]) for eps in eps_values: clus = DBSCAN(min_samples=k, eps=eps, metric="precomputed", n_jobs=N_JOBS) method_name = type(clus).__name__ CLUSTERERS[f"{method_name} #{idx}"] = clus idx = idx + 1 display(len(CLUSTERERS)) _iter = iter(CLUSTERERS.items()) display(next(_iter)) display(next(_iter)) clustering_method_name = method_name display(clustering_method_name) ``` ## Generate ensemble ``` data_dist = pairwise_distances(data, metric=CLUSTERING_OPTIONS["METRIC"]) data_dist.shape pd.Series(data_dist.flatten()).describe().apply(str) ensemble = generate_ensemble( data_dist, CLUSTERERS, attributes=CLUSTERING_ATTRIBUTES_TO_SAVE, ) ensemble.shape ensemble.head() _tmp = ensemble["n_clusters"].value_counts() display(_tmp) assert _tmp.index[0] == 3 assert _tmp.loc[3] == 22 ensemble_stats = ensemble["n_clusters"].describe() display(ensemble_stats) # number of noisy points _tmp = ensemble.copy() _tmp = _tmp.assign(n_noisy=ensemble["partition"].apply(lambda x: np.isnan(x).sum())) _tmp_stats = _tmp["n_noisy"].describe() display(_tmp_stats) assert _tmp_stats["min"] > 5 assert _tmp_stats["max"] < 600 assert 90 < _tmp_stats["mean"] < 95 ``` ## Testing ``` assert ensemble_stats["min"] > 1 assert not ensemble["n_clusters"].isna().any() # all partitions have the right size assert np.all( [part["partition"].shape[0] == data.shape[0] for idx, part in ensemble.iterrows()] ) ``` ## Add clustering quality measures ``` def _remove_nans(data, part): not_nan_idx = ~np.isnan(part) return data.iloc[not_nan_idx], part[not_nan_idx] def _apply_func(func, data, part): no_nan_data, no_nan_part = _remove_nans(data, part) return func(no_nan_data, no_nan_part) ensemble = ensemble.assign( si_score=ensemble["partition"].apply( lambda x: _apply_func(silhouette_score, data, x) ), ch_score=ensemble["partition"].apply( lambda x: _apply_func(calinski_harabasz_score, data, x) ), db_score=ensemble["partition"].apply( lambda x: _apply_func(davies_bouldin_score, data, x) ), ) ensemble.shape ensemble.head() ``` # Cluster quality ``` with pd.option_context("display.max_rows", None, "display.max_columns", None): _df = ensemble.groupby(["n_clusters"]).mean() display(_df) with sns.plotting_context("talk", font_scale=0.75), sns.axes_style( "whitegrid", {"grid.linestyle": "--"} ): fig = plt.figure(figsize=(14, 6)) ax = sns.pointplot(data=ensemble, x="n_clusters", y="si_score") ax.set_ylabel("Silhouette index\n(higher is better)") ax.set_xlabel("Number of clusters ($k$)") ax.set_xticklabels(ax.get_xticklabels(), rotation=45) plt.grid(True) plt.tight_layout() with sns.plotting_context("talk", font_scale=0.75), sns.axes_style( "whitegrid", {"grid.linestyle": "--"} ): fig = plt.figure(figsize=(14, 6)) ax = sns.pointplot(data=ensemble, x="n_clusters", y="ch_score") ax.set_ylabel("Calinski-Harabasz index\n(higher is better)") ax.set_xlabel("Number of clusters ($k$)") ax.set_xticklabels(ax.get_xticklabels(), rotation=45) plt.grid(True) plt.tight_layout() with sns.plotting_context("talk", font_scale=0.75), sns.axes_style( "whitegrid", {"grid.linestyle": "--"} ): fig = plt.figure(figsize=(14, 6)) ax = sns.pointplot(data=ensemble, x="n_clusters", y="db_score") ax.set_ylabel("Davies-Bouldin index\n(lower is better)") ax.set_xlabel("Number of clusters ($k$)") ax.set_xticklabels(ax.get_xticklabels(), rotation=45) plt.grid(True) plt.tight_layout() ``` # Conclusions The values explored above for `k_values` and `eps_range_per_k` are the one that will be used for DBSCAN in this data version.
github_jupyter
# Multi Investment Optimization In the following, we show how PyPSA can deal with multi-investment optimization, also known as multi-horizon optimization. Here, the total set of snapshots is divided into investment periods. For the model, this translates into multi-indexed snapshots with the first level being the investment period and the second level the according time steps. In each investment period new asset may be added to the system. On the other hand assets may only operate as long as allowed by their lifetime. In contrast to the ordinary optimisation, the following concepts have to be taken into account. 1. `investment_periods` - `pypsa.Network` attribute. This is the set of periods which specify when new assets may be built. In the current implementation, these have to be the same as the first level values in the `snapshots` attribute. 2. `investment_period_weightings` - `pypsa.Network` attribute. These specify the weighting of each period in the objective function. 3. `build_year` - general component attribute. A single asset may only be built when the build year is smaller or equal to the current investment period. For example, assets with a build year `2029` are considered in the investment period `2030`, but not in the period `2025`. 4. `lifetime` - general component attribute. An asset is only considered in an investment period if present at the beginning of an investment period. For example, an asset with build year `2029` and lifetime `30` is considered in the investment period `2055` but not in the period `2060`. In the following, we set up a three node network with generators, lines and storages and run a optimisation covering the time span from 2020 to 2050 and each decade is one investment period. ``` import pypsa import pandas as pd import numpy as np import matplotlib.pyplot as plt ``` We set up the network with investment periods and snapshots. ``` n = pypsa.Network() years = [2020, 2030, 2040, 2050] freq = "24" snapshots = pd.DatetimeIndex([]) for year in years: period = pd.date_range( start="{}-01-01 00:00".format(year), freq="{}H".format(freq), periods=8760 / float(freq), ) snapshots = snapshots.append(period) # convert to multiindex and assign to network n.snapshots = pd.MultiIndex.from_arrays([snapshots.year, snapshots]) n.investment_periods = years n.snapshot_weightings n.investment_periods ``` Set the years and objective weighting per investment period. For the objective weighting, we consider a discount rate defined by $$ D(t) = \dfrac{1}{(1+r)^t} $$ where $r$ is the discount rate. For each period we sum up all discounts rates of the corresponding years which gives us the effective objective weighting. ``` n.investment_period_weightings["years"] = list(np.diff(years)) + [10] r = 0.01 T = 0 for period, nyears in n.investment_period_weightings.years.items(): discounts = [(1 / (1 + r) ** t) for t in range(T, T + nyears)] n.investment_period_weightings.at[period, "objective"] = sum(discounts) T += nyears n.investment_period_weightings ``` Add the components ``` for i in range(3): n.add("Bus", "bus {}".format(i)) # add three lines in a ring n.add( "Line", "line 0->1", bus0="bus 0", bus1="bus 1", ) n.add( "Line", "line 1->2", bus0="bus 1", bus1="bus 2", capital_cost=10, build_year=2030, ) n.add( "Line", "line 2->0", bus0="bus 2", bus1="bus 0", ) n.lines["x"] = 0.0001 n.lines["s_nom_extendable"] = True n.lines # add some generators p_nom_max = pd.Series( (np.random.uniform() for sn in range(len(n.snapshots))), index=n.snapshots, name="generator ext 2020", ) # renewable (can operate 2020, 2030) n.add( "Generator", "generator ext 0 2020", bus="bus 0", p_nom=50, build_year=2020, lifetime=20, marginal_cost=2, capital_cost=1, p_max_pu=p_nom_max, carrier="solar", p_nom_extendable=True, ) # can operate 2040, 2050 n.add( "Generator", "generator ext 0 2040", bus="bus 0", p_nom=50, build_year=2040, lifetime=11, marginal_cost=25, capital_cost=10, carrier="OCGT", p_nom_extendable=True, ) # can operate in 2040 n.add( "Generator", "generator fix 1 2040", bus="bus 1", p_nom=50, build_year=2040, lifetime=10, carrier="CCGT", marginal_cost=20, capital_cost=1, ) n.generators n.add( "StorageUnit", "storageunit non-cyclic 2030", bus="bus 2", p_nom=0, capital_cost=2, build_year=2030, lifetime=21, cyclic_state_of_charge=False, p_nom_extendable=False, ) n.add( "StorageUnit", "storageunit periodic 2020", bus="bus 2", p_nom=0, capital_cost=1, build_year=2020, lifetime=21, cyclic_state_of_charge=True, cyclic_state_of_charge_per_period=True, p_nom_extendable=True, ) n.storage_units ``` Add the load ``` load_var = pd.Series( 100 * np.random.rand(len(n.snapshots)), index=n.snapshots, name="load" ) n.add("Load", "load 2", bus="bus 2", p_set=load_var) load_fix = pd.Series(75, index=n.snapshots, name="load") n.add("Load", "load 1", bus="bus 1", p_set=load_fix) ``` Run the optimization ``` n.loads_t.p_set n.lopf(pyomo=False, multi_investment_periods=True) c = "Generator" df = pd.concat( { period: n.get_active_assets(c, period) * n.df(c).p_nom_opt for period in n.investment_periods }, axis=1, ) df.T.plot.bar( stacked=True, edgecolor="white", width=1, ylabel="Capacity", xlabel="Investment Period", rot=0, figsize=(10, 5), ) plt.tight_layout() df = n.generators_t.p.sum(level=0).T df.T.plot.bar( stacked=True, edgecolor="white", width=1, ylabel="Generation", xlabel="Investment Period", rot=0, figsize=(10, 5), ) ```
github_jupyter
## Rhetorical relations classification used in tree building: ESIM Prepare data and model-related scripts. Evaluate models. Make and evaluate ansembles for ESIM and BiMPM model / ESIM and feature-based model. Output: - ``models/relation_predictor_esim/*`` ``` %load_ext autoreload %autoreload 2 import os import glob import pandas as pd import numpy as np import pickle from utils.file_reading import read_edus, read_gold, read_negative, read_annotation ``` ### Make a directory ``` MODEL_PATH = 'models/label_predictor_esim' ! mkdir $MODEL_PATH TRAIN_FILE_PATH = os.path.join(MODEL_PATH, 'nlabel_cf_train.tsv') DEV_FILE_PATH = os.path.join(MODEL_PATH, 'nlabel_cf_dev.tsv') TEST_FILE_PATH = os.path.join(MODEL_PATH, 'nlabel_cf_test.tsv') ``` ### Prepare train/test sets ``` IN_PATH = 'data_labeling' train_samples = pd.read_pickle(os.path.join(IN_PATH, 'train_samples.pkl')) dev_samples = pd.read_pickle(os.path.join(IN_PATH, 'dev_samples.pkl')) test_samples = pd.read_pickle(os.path.join(IN_PATH, 'test_samples.pkl')) counts = train_samples['relation'].value_counts(normalize=False).values NUMBER_CLASSES = len(counts) print("number of classes:", NUMBER_CLASSES) print("class weights:") np.round(counts.min() / counts, decimals=6) counts = train_samples['relation'].value_counts() counts import razdel def tokenize(text): result = ' '.join([tok.text for tok in razdel.tokenize(text)]) return result train_samples['snippet_x'] = train_samples.snippet_x.map(tokenize) train_samples['snippet_y'] = train_samples.snippet_y.map(tokenize) dev_samples['snippet_x'] = dev_samples.snippet_x.map(tokenize) dev_samples['snippet_y'] = dev_samples.snippet_y.map(tokenize) test_samples['snippet_x'] = test_samples.snippet_x.map(tokenize) test_samples['snippet_y'] = test_samples.snippet_y.map(tokenize) train_samples = train_samples.reset_index() train_samples[['relation', 'snippet_x', 'snippet_y', 'index']].to_csv(TRAIN_FILE_PATH, sep='\t', header=False, index=False) dev_samples = dev_samples.reset_index() dev_samples[['relation', 'snippet_x', 'snippet_y', 'index']].to_csv(DEV_FILE_PATH, sep='\t', header=False, index=False) test_samples = test_samples.reset_index() test_samples[['relation', 'snippet_x', 'snippet_y', 'index']].to_csv(TEST_FILE_PATH, sep='\t', header=False, index=False) ``` ### Modify model (Add F1, concatenated encoding) ``` %%writefile models/bimpm_custom_package/model/esim.py from typing import Dict, List, Any, Optional import numpy import torch from allennlp.common.checks import check_dimensions_match from allennlp.data import TextFieldTensors, Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, InputVariationalDropout from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder from allennlp.nn import InitializerApplicator from allennlp.nn.util import ( get_text_field_mask, masked_softmax, weighted_sum, masked_max, ) from allennlp.training.metrics import CategoricalAccuracy, F1Measure @Model.register("custom_esim") class CustomESIM(Model): """ This `Model` implements the ESIM sequence model described in [Enhanced LSTM for Natural Language Inference] (https://api.semanticscholar.org/CorpusID:34032948) by Chen et al., 2017. Registered as a `Model` with name "esim". # Parameters vocab : `Vocabulary` text_field_embedder : `TextFieldEmbedder` Used to embed the `premise` and `hypothesis` `TextFields` we get as input to the model. encoder : `Seq2SeqEncoder` Used to encode the premise and hypothesis. matrix_attention : `MatrixAttention` This is the attention function used when computing the similarity matrix between encoded words in the premise and words in the hypothesis. projection_feedforward : `FeedForward` The feedforward network used to project down the encoded and enhanced premise and hypothesis. inference_encoder : `Seq2SeqEncoder` Used to encode the projected premise and hypothesis for prediction. output_feedforward : `FeedForward` Used to prepare the concatenated premise and hypothesis for prediction. output_logit : `FeedForward` This feedforward network computes the output logits. dropout : `float`, optional (default=`0.5`) Dropout percentage to use. initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`) Used to initialize the model parameters. """ def __init__( self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, matrix_attention: MatrixAttention, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, encode_together: bool = False, dropout: float = 0.5, class_weights: list = [], initializer: InitializerApplicator = InitializerApplicator(), **kwargs, ) -> None: super().__init__(vocab, **kwargs) self._text_field_embedder = text_field_embedder self._encoder = encoder self._matrix_attention = matrix_attention self._projection_feedforward = projection_feedforward self._inference_encoder = inference_encoder if dropout: self.dropout = torch.nn.Dropout(dropout) self.rnn_input_dropout = InputVariationalDropout(dropout) else: self.dropout = None self.rnn_input_dropout = None self._output_feedforward = output_feedforward self._output_logit = output_logit self.encode_together = encode_together self._num_labels = vocab.get_vocab_size(namespace="labels") check_dimensions_match( text_field_embedder.get_output_dim(), encoder.get_input_dim(), "text field embedding dim", "encoder input dim", ) check_dimensions_match( encoder.get_output_dim() * 4, projection_feedforward.get_input_dim(), "encoder output dim", "projection feedforward input", ) check_dimensions_match( projection_feedforward.get_output_dim(), inference_encoder.get_input_dim(), "proj feedforward output dim", "inference lstm input dim", ) self.metrics = {"accuracy": CategoricalAccuracy()} if class_weights: self.class_weights = class_weights else: self.class_weights = [1.] * self.classifier_feedforward.get_output_dim() for _class in range(len(self.class_weights)): self.metrics.update({ f"f1_rel{_class}": F1Measure(_class), }) self._loss = torch.nn.CrossEntropyLoss(weight=torch.FloatTensor(self.class_weights)) initializer(self) def forward( # type: ignore self, premise: TextFieldTensors, hypothesis: TextFieldTensors, label: torch.IntTensor = None, metadata: List[Dict[str, Any]] = None, ) -> Dict[str, torch.Tensor]: """ # Parameters premise : `TextFieldTensors` From a `TextField` hypothesis : `TextFieldTensors` From a `TextField` label : `torch.IntTensor`, optional (default = `None`) From a `LabelField` metadata : `List[Dict[str, Any]]`, optional (default = `None`) Metadata containing the original tokenization of the premise and hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively. # Returns An output dictionary consisting of: label_logits : `torch.FloatTensor` A tensor of shape `(batch_size, num_labels)` representing unnormalised log probabilities of the entailment label. label_probs : `torch.FloatTensor` A tensor of shape `(batch_size, num_labels)` representing probabilities of the entailment label. loss : `torch.FloatTensor`, optional A scalar loss to be optimised. """ embedded_premise = self._text_field_embedder(premise) embedded_hypothesis = self._text_field_embedder(hypothesis) premise_mask = get_text_field_mask(premise) hypothesis_mask = get_text_field_mask(hypothesis) # apply dropout for LSTM if self.rnn_input_dropout: embedded_premise = self.rnn_input_dropout(embedded_premise) embedded_hypothesis = self.rnn_input_dropout(embedded_hypothesis) # encode premise and hypothesis encoded_premise = self._encoder(embedded_premise, premise_mask) encoded_hypothesis = self._encoder(embedded_hypothesis, hypothesis_mask) # Shape: (batch_size, premise_length, hypothesis_length) similarity_matrix = self._matrix_attention(encoded_premise, encoded_hypothesis) # Shape: (batch_size, premise_length, hypothesis_length) p2h_attention = masked_softmax(similarity_matrix, hypothesis_mask) # Shape: (batch_size, premise_length, embedding_dim) attended_hypothesis = weighted_sum(encoded_hypothesis, p2h_attention) # Shape: (batch_size, hypothesis_length, premise_length) h2p_attention = masked_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask) # Shape: (batch_size, hypothesis_length, embedding_dim) attended_premise = weighted_sum(encoded_premise, h2p_attention) # the "enhancement" layer premise_enhanced = torch.cat( [ encoded_premise, attended_hypothesis, encoded_premise - attended_hypothesis, encoded_premise * attended_hypothesis, ], dim=-1, ) hypothesis_enhanced = torch.cat( [ encoded_hypothesis, attended_premise, encoded_hypothesis - attended_premise, encoded_hypothesis * attended_premise, ], dim=-1, ) # The projection layer down to the model dimension. Dropout is not applied before # projection. projected_enhanced_premise = self._projection_feedforward(premise_enhanced) projected_enhanced_hypothesis = self._projection_feedforward(hypothesis_enhanced) # Run the inference layer if self.rnn_input_dropout: projected_enhanced_premise = self.rnn_input_dropout(projected_enhanced_premise) projected_enhanced_hypothesis = self.rnn_input_dropout(projected_enhanced_hypothesis) v_ai = self._inference_encoder(projected_enhanced_premise, premise_mask) v_bi = self._inference_encoder(projected_enhanced_hypothesis, hypothesis_mask) # The pooling layer -- max and avg pooling. # (batch_size, model_dim) v_a_max = masked_max(v_ai, premise_mask.unsqueeze(-1), dim=1) v_b_max = masked_max(v_bi, hypothesis_mask.unsqueeze(-1), dim=1) v_a_avg = torch.sum(v_ai * premise_mask.unsqueeze(-1), dim=1) / torch.sum( premise_mask, 1, keepdim=True ) v_b_avg = torch.sum(v_bi * hypothesis_mask.unsqueeze(-1), dim=1) / torch.sum( hypothesis_mask, 1, keepdim=True ) # Now concat # (batch_size, model_dim * 2 * 4) v_all = torch.cat([v_a_avg, v_a_max, v_b_avg, v_b_max], dim=1) # the final MLP -- apply dropout to input, and MLP applies to output & hidden if self.dropout: v_all = self.dropout(v_all) output_hidden = self._output_feedforward(v_all) label_logits = self._output_logit(output_hidden) label_probs = torch.nn.functional.softmax(label_logits, dim=-1) output_dict = {"label_logits": label_logits, "label_probs": label_probs} if label is not None: loss = self._loss(label_logits, label.long().view(-1)) output_dict["loss"] = loss for metric in self.metrics.values(): metric(label_logits, label.long().view(-1)) return output_dict def get_metrics(self, reset: bool = False) -> Dict[str, float]: metrics = {"accuracy": self.metrics["accuracy"].get_metric(reset=reset)} for _class in range(len(self.class_weights)): metrics.update({ f"f1_rel{_class}": self.metrics[f"f1_rel{_class}"].get_metric(reset=reset)['f1'], }) metrics["f1_macro"] = numpy.mean([metrics[f"f1_rel{_class}"] for _class in range(len(self.class_weights))]) return metrics default_predictor = "textual_entailment" ! cp models/bimpm_custom_package/model/esim.py ../../../maintenance_rst/models/customization_package/model/esim.py ``` ### 2. Generate config files #### ELMo ``` %%writefile $MODEL_PATH/config_elmo.json local NUM_EPOCHS = 200; local LR = 1e-3; local LSTM_ENCODER_HIDDEN = 25; { "dataset_reader": { "type": "quora_paraphrase", "tokenizer": { "type": "just_spaces" }, "token_indexers": { "token_characters": { "type": "characters", "min_padding_length": 30, }, "elmo": { "type": "elmo_characters" } } }, "train_data_path": "label_predictor_esim/nlabel_cf_train.tsv", "validation_data_path": "label_predictor_esim/nlabel_cf_dev.tsv", "test_data_path": "label_predictor_esim/nlabel_cf_test.tsv", "model": { "type": "custom_esim", "dropout": 0.5, "class_weights": [ 0.027483, 0.032003, 0.080478, 0.102642, 0.121394, 0.135027, 0.136856, 0.170897, 0.172355, 0.181655, 0.193858, 0.211297, 0.231651, 0.260982, 0.334437, 0.378277, 0.392996, 0.567416, 0.782946, 0.855932, 0.971154, 1.0], "encode_together": false, "text_field_embedder": { "token_embedders": { "elmo": { "type": "elmo_token_embedder", "options_file": "rsv_elmo/options.json", "weight_file": "rsv_elmo/model.hdf5", "do_layer_norm": false, "dropout": 0.1 }, "token_characters": { "type": "character_encoding", "dropout": 0.1, "embedding": { "embedding_dim": 20, "padding_index": 0, "vocab_namespace": "token_characters" }, "encoder": { "type": "lstm", "input_size": $.model.text_field_embedder.token_embedders.token_characters.embedding.embedding_dim, "hidden_size": LSTM_ENCODER_HIDDEN, "num_layers": 1, "bidirectional": true, "dropout": 0.4 }, }, } }, "encoder": { "type": "lstm", "input_size": 1024+LSTM_ENCODER_HIDDEN+LSTM_ENCODER_HIDDEN, "hidden_size": 300, "num_layers": 1, "bidirectional": true }, "matrix_attention": {"type": "dot_product"}, "projection_feedforward": { "input_dim": 2400, "hidden_dims": 300, "num_layers": 1, "activations": "relu" }, "inference_encoder": { "type": "lstm", "input_size": 300, "hidden_size": 300, "num_layers": 1, "bidirectional": true }, "output_feedforward": { "input_dim": 2400, "num_layers": 1, "hidden_dims": 300, "activations": "relu", "dropout": 0.5 }, "output_logit": { "input_dim": 300, "num_layers": 1, "hidden_dims": 22, "activations": "linear" }, "initializer": { "regexes": [ [".*linear_layers.*weight", {"type": "xavier_normal"}], [".*linear_layers.*bias", {"type": "constant", "val": 0}], [".*weight_ih.*", {"type": "xavier_normal"}], [".*weight_hh.*", {"type": "orthogonal"}], [".*bias.*", {"type": "constant", "val": 0}], [".*matcher.*match_weights.*", {"type": "kaiming_normal"}] ] } }, "data_loader": { "batch_sampler": { "type": "bucket", "batch_size": 20, "padding_noise": 0.0, "sorting_keys": ["premise"], }, }, "trainer": { "num_epochs": NUM_EPOCHS, "cuda_device": 1, "grad_clipping": 5.0, "validation_metric": "+f1_macro", "shuffle": true, "optimizer": { "type": "adam", "lr": LR }, "learning_rate_scheduler": { "type": "reduce_on_plateau", "factor": 0.5, "mode": "max", "patience": 0 } } } ! cp -r $MODEL_PATH ../../../maintenance_rst/models/label_predictor_esim ! cp -r $MODEL_PATH/config_elmo.json ../../../maintenance_rst/models/label_predictor_esim/ ``` ### 3. Scripts for training/prediction #### Option 1. Directly from the config Train a model ``` %%writefile models/train_label_predictor_esim.sh # usage: # $ cd models # $ sh train_label_predictor.sh {bert|elmo} result_30 export METHOD=${1} export RESULT_DIR=${2} export DEV_FILE_PATH="nlabel_cf_dev.tsv" export TEST_FILE_PATH="nlabel_cf_test.tsv" rm -r label_predictor_esim/${RESULT_DIR}/ allennlp train -s label_predictor_esim/${RESULT_DIR}/ label_predictor_esim/config_${METHOD}.json \ --include-package bimpm_custom_package allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_esim/${RESULT_DIR}/predictions_dev.json label_predictor_esim/${RESULT_DIR}/model.tar.gz label_predictor_esim/${DEV_FILE_PATH} \ --include-package bimpm_custom_package \ --predictor textual-entailment allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_esim/${RESULT_DIR}/predictions_test.json label_predictor_esim/${RESULT_DIR}/model.tar.gz label_predictor_esim/${TEST_FILE_PATH} \ --include-package bimpm_custom_package \ --predictor textual-entailment ! cp models/train_label_predictor_esim.sh ../../../maintenance_rst/models/ ``` Predict on dev&test ``` %%writefile models/eval_label_predictor_esim.sh # usage: # $ cd models # $ sh train_label_predictor.sh {bert|elmo} result_30 export METHOD=${1} export RESULT_DIR=${2} export DEV_FILE_PATH="nlabel_cf_dev.tsv" export TEST_FILE_PATH="nlabel_cf_test.tsv" allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_esim/${RESULT_DIR}/predictions_dev.json label_predictor_esim/${RESULT_DIR}/model.tar.gz label_predictor_esim/${DEV_FILE_PATH} \ --include-package bimpm_custom_package \ --predictor textual-entailment allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_esim/${RESULT_DIR}/predictions_test.json label_predictor_esim/${RESULT_DIR}/model.tar.gz label_predictor_esim/${TEST_FILE_PATH} \ --include-package bimpm_custom_package \ --predictor textual-entailment ! cp models/eval_label_predictor_esim.sh ../../../maintenance_rst/models/ ``` (optional) predict on train ``` %%writefile models/eval_label_predictor_train.sh # usage: # $ cd models # $ sh eval_label_predictor_train.sh {bert|elmo} result_30 export METHOD=${1} export RESULT_DIR=${2} export TEST_FILE_PATH="nlabel_cf_train.tsv" allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_bimpm/${RESULT_DIR}/predictions_train.json label_predictor_bimpm/${RESULT_DIR}/model.tar.gz label_predictor_bimpm/${TEST_FILE_PATH} \ --include-package customization_package \ --predictor textual-entailment ``` #### Option 2. Using wandb for parameters adjustment ``` %%writefile ../../../maintenance_rst/models/wandb_label_predictor_esim.yaml name: label_predictor_esim program: wandb_allennlp # this is a wrapper console script around allennlp commands. It is part of wandb-allennlp method: bayes ## Do not for get to use the command keyword to specify the following command structure command: - ${program} #omit the interpreter as we use allennlp train command directly - "--subcommand=train" - "--include-package=customization_package" # add all packages containing your registered classes here - "--config_file=label_predictor_esim/config_elmo.json" - ${args} metric: name: best_f1_macro goal: maximize parameters: model.encode_together: values: ["true", ] iterator.batch_size: values: [8,] trainer.optimizer.lr: values: [0.001,] model.dropout: values: [0.5] ``` 3. Run training ``wandb sweep wandb_label_predictor_esim.yaml`` (returns %sweepname1) ``wandb sweep wandb_label_predictor2.yaml`` (returns %sweepname2) ``wandb agent --count 1 %sweepname1 && wandb agent --count 1 %sweepname2`` Move the best model in label_predictor_bimpm ``` ! ls -laht models/wandb ! cp -r models/wandb/run-20201218_123424-kcphaqhi/training_dumps models/label_predictor_esim/esim_elmo ``` **Or** load from wandb by %sweepname ``` import wandb api = wandb.Api() run = api.run("tchewik/tmp/7hum4oom") for file in run.files(): file.download(replace=True) ! cp -r training_dumps models/label_predictor_bimpm/toasty-sweep-1 ``` And run evaluation from shell ``sh eval_label_predictor_esim.sh {elmo|elmo_fasttext} toasty-sweep-1`` ### 4. Evaluate classifier ``` def load_predictions(path): result = [] vocab = [] with open(path, 'r') as file: for line in file.readlines(): line = json.loads(line) if line.get("label"): result.append(line.get("label")) elif line.get("label_probs"): if not vocab: vocab = open(path[:path.rfind('/')] + '/vocabulary/labels.txt', 'r').readlines() vocab = [label.strip() for label in vocab] result.append(vocab[np.argmax(line.get("label_probs"))]) print('length of result:', len(result)) return result RESULT_DIR = 'esim_elmo' ! mkdir models/label_predictor_esim/$RESULT_DIR ! cp -r ../../../maintenance_rst/models/label_predictor_esim/$RESULT_DIR/*.json models/label_predictor_esim/$RESULT_DIR/ ``` On dev set ``` import pandas as pd import json true = pd.read_csv(DEV_FILE_PATH, sep='\t', header=None)[0].values.tolist() pred = load_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_dev.json') from sklearn.metrics import classification_report print(classification_report(true[:len(pred)], pred, digits=4)) test_metrics = classification_report(true[:len(pred)], pred, digits=4, output_dict=True) test_f1 = np.array( [test_metrics[label].get('f1-score') for label in test_metrics if type(test_metrics[label]) == dict]) * 100 test_f1 len(true) from sklearn.metrics import f1_score, precision_score, recall_score print('f1: %.2f'%(f1_score(true[:len(pred)], pred, average='macro')*100)) print('pr: %.2f'%(precision_score(true[:len(pred)], pred, average='macro')*100)) print('re: %.2f'%(recall_score(true[:len(pred)], pred, average='macro')*100)) from utils.plot_confusion_matrix import plot_confusion_matrix from sklearn.metrics import confusion_matrix labels = list(set(true)) labels.sort() plot_confusion_matrix(confusion_matrix(true[:len(pred)], pred, labels), target_names=labels, normalize=True) top_classes = [ 'attribution_NS', 'attribution_SN', 'purpose_NS', 'purpose_SN', 'condition_SN', 'contrast_NN', 'condition_NS', 'joint_NN', 'concession_NS', 'same-unit_NN', 'elaboration_NS', 'cause-effect_NS', ] class_mapper = {weird_class: 'other' + weird_class[-3:] for weird_class in labels if not weird_class in top_classes} import numpy as np true = [class_mapper.get(value) if class_mapper.get(value) else value for value in true] pred = [class_mapper.get(value) if class_mapper.get(value) else value for value in pred] pred_mapper = { 'other_NN': 'joint_NN', 'other_NS': 'joint_NN', 'other_SN': 'joint_NN' } pred = [pred_mapper.get(value) if pred_mapper.get(value) else value for value in pred] _to_stay = (np.array(true) != 'other_NN') & (np.array(true) != 'other_SN') & (np.array(true) != 'other_NS') _true = np.array(true)[_to_stay] _pred = np.array(pred)[_to_stay[:len(pred)]] labels = list(set(_true)) from sklearn.metrics import f1_score, precision_score, recall_score print('f1: %.2f'%(f1_score(true[:len(pred)], pred, average='macro')*100)) print('pr: %.2f'%(precision_score(true[:len(pred)], pred, average='macro')*100)) print('re: %.2f'%(recall_score(true[:len(pred)], pred, average='macro')*100)) labels.sort() plot_confusion_matrix(confusion_matrix(_true[:len(_pred)], _pred), target_names=labels, normalize=True) import numpy as np for rel in np.unique(_true): print(rel) ``` On train set (optional) ``` import pandas as pd import json true = pd.read_csv('models/label_predictor_bimpm/nlabel_cf_train.tsv', sep='\t', header=None)[0].values.tolist() pred = load_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_train.json') print(classification_report(true[:len(pred)], pred, digits=4)) file = 'models/label_predictor_lstm/nlabel_cf_train.tsv' true_train = pd.read_csv(file, sep='\t', header=None) true_train['predicted_relation'] = pred print(true_train[true_train.relation != true_train.predicted_relation].shape) true_train[true_train.relation != true_train.predicted_relation].to_csv('mispredicted_relations.csv', sep='\t') ``` On test set ``` import pandas as pd import json true = pd.read_csv(TEST_FILE_PATH, sep='\t', header=None)[0].values.tolist() pred = load_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_test.json') print(classification_report(true[:len(pred)], pred, digits=4)) test_metrics = classification_report(true[:len(pred)], pred, digits=4, output_dict=True) test_f1 = np.array( [test_metrics[label].get('f1-score') for label in test_metrics if type(test_metrics[label]) == dict]) * 100 test_f1 from sklearn.metrics import f1_score, precision_score, recall_score print('f1: %.2f'%(f1_score(true[:len(pred)], pred, average='macro')*100)) print('pr: %.2f'%(precision_score(true[:len(pred)], pred, average='macro')*100)) print('re: %.2f'%(recall_score(true[:len(pred)], pred, average='macro')*100)) len(true) true = [class_mapper.get(value) if class_mapper.get(value) else value for value in true] pred = [class_mapper.get(value) if class_mapper.get(value) else value for value in pred] pred = [pred_mapper.get(value) if pred_mapper.get(value) else value for value in pred] _to_stay = (np.array(true) != 'other_NN') & (np.array(true) != 'other_SN') & (np.array(true) != 'other_NS') _true = np.array(true)[_to_stay] _pred = np.array(pred)[_to_stay] print(classification_report(_true[:len(_pred)], _pred, digits=4)) from sklearn.metrics import f1_score, precision_score, recall_score print('f1: %.2f'%(f1_score(_true[:len(_pred)], _pred, average='macro')*100)) print('pr: %.2f'%(precision_score(_true[:len(_pred)], _pred, average='macro')*100)) print('re: %.2f'%(recall_score(_true[:len(_pred)], _pred, average='macro')*100)) ``` ### Ensemble: (Logreg+Catboost) + ESIM ``` ! ls models/label_predictor_esim import json model_vocab = open(MODEL_PATH + '/' + RESULT_DIR + '/vocabulary/labels.txt', 'r').readlines() model_vocab = [label.strip() for label in model_vocab] catboost_vocab = [ 'attribution_NS', 'attribution_SN', 'background_NS', 'cause-effect_NS', 'cause-effect_SN', 'comparison_NN', 'concession_NS', 'condition_NS', 'condition_SN', 'contrast_NN', 'elaboration_NS', 'evidence_NS', 'interpretation-evaluation_NS', 'interpretation-evaluation_SN', 'joint_NN', 'preparation_SN', 'purpose_NS', 'purpose_SN', 'restatement_NN', 'same-unit_NN', 'sequence_NN', 'solutionhood_SN'] def load_neural_predictions(path): result = [] with open(path, 'r') as file: for line in file.readlines(): line = json.loads(line) if line.get('probs'): probs = line.get('probs') elif line.get('label_probs'): probs = line.get('label_probs') probs = {model_vocab[i]: probs[i] for i in range(len(model_vocab))} result.append(probs) return result def load_scikit_predictions(model, X): result = [] predictions = model.predict_proba(X) for prediction in predictions: probs = {catboost_vocab[j]: prediction[j] for j in range(len(catboost_vocab))} result.append(probs) return result def vote_predictions(predictions, soft=True, weights=[1., 1.]): for i in range(1, len(predictions)): assert len(predictions[i-1]) == len(predictions[i]) if weights == [1., 1.]: weights = [1.,] * len(predictions) result = [] for i in range(len(predictions[0])): sample_result = {} for key in predictions[0][i].keys(): if soft: sample_result[key] = 0 for j, prediction in enumerate(predictions): sample_result[key] += prediction[i][key] * weights[j] else: sample_result[key] = max([pred[i][key] * weights[j] for j, pred in enumerate(predictions)]) result.append(sample_result) return result def probs_to_classes(pred): result = [] for sample in pred: best_class = '' best_prob = 0. for key in sample.keys(): if sample[key] > best_prob: best_prob = sample[key] best_class = key result.append(best_class) return result ! pip install catboost import pickle fs_catboost_plus_logreg = pickle.load(open('models/relation_predictor_baseline/model.pkl', 'rb')) lab_encoder = pickle.load(open('models/relation_predictor_baseline/label_encoder.pkl', 'rb')) scaler = pickle.load(open('models/relation_predictor_baseline/scaler.pkl', 'rb')) drop_columns = pickle.load(open('models/relation_predictor_baseline/drop_columns.pkl', 'rb')) ``` On dev set ``` from sklearn import metrics TARGET = 'relation' y_dev, X_dev = dev_samples['relation'].to_frame(), dev_samples.drop('relation', axis=1).drop( columns=drop_columns + ['category_id', 'index']) X_scaled_np = scaler.transform(X_dev) X_dev = pd.DataFrame(X_scaled_np, index=X_dev.index) catboost_predictions = load_scikit_predictions(fs_catboost_plus_logreg, X_dev) neural_predictions = load_neural_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_dev.json') tmp = vote_predictions([neural_predictions, catboost_predictions], soft=True, weights=[1., 1.]) ensemble_pred = probs_to_classes(tmp) print('weighted f1: ', metrics.f1_score(y_dev.values, ensemble_pred, average='weighted')) print('macro f1: ', metrics.f1_score(y_dev.values, ensemble_pred, average='macro')) print('accuracy: ', metrics.accuracy_score(y_dev.values, ensemble_pred)) print() print(metrics.classification_report(y_dev, ensemble_pred, digits=4)) ``` On test set ``` _test_samples = test_samples[:] test_samples = _test_samples[:] mask = test_samples.filename.str.contains('news') test_samples = test_samples[test_samples['filename'].str.contains('news')] mask.shape test_samples.shape def mask_predictions(predictions, mask): result = [] mask = mask.values for i, prediction in enumerate(predictions): if mask[i]: result.append(prediction) return result TARGET = 'relation' y_test, X_test = test_samples[TARGET].to_frame(), test_samples.drop(TARGET, axis=1).drop( columns=drop_columns + ['category_id', 'index']) X_scaled_np = scaler.transform(X_test) X_test = pd.DataFrame(X_scaled_np, index=X_test.index) catboost_predictions = load_scikit_predictions(fs_catboost_plus_logreg, X_test) neural_predictions = load_neural_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_test.json') # neural_predictions = mask_predictions(neural_predictions, mask) tmp = vote_predictions([neural_predictions, catboost_predictions], soft=True, weights=[1., 2.]) ensemble_pred = probs_to_classes(tmp) print('weighted f1: ', metrics.f1_score(y_test.values, ensemble_pred, average='weighted')) print('macro f1: ', metrics.f1_score(y_test.values, ensemble_pred, average='macro')) print('accuracy: ', metrics.accuracy_score(y_test.values, ensemble_pred)) print() print(metrics.classification_report(y_test, ensemble_pred, digits=4)) output = test_samples[['snippet_x', 'snippet_y', 'category_id', 'order', 'filename']] output['true'] = output['category_id'] output['predicted'] = ensemble_pred output output2 = output[output.true != output.predicted.map(lambda row: row.split('_')[0])] output2.shape output2 del output2['category_id'] output2.to_csv('mispredictions.csv') test_metrics = metrics.classification_report(y_test, ensemble_pred, digits=4, output_dict=True) test_f1 = np.array( [test_metrics[label].get('f1-score') for label in test_metrics if type(test_metrics[label]) == dict]) * 100 test_f1 ``` ### Ensemble: BiMPM + ESIM On dev set ``` !ls models/label_predictor_bimpm/ from sklearn import metrics TARGET = 'relation' y_dev, X_dev = dev_samples['relation'].to_frame(), dev_samples.drop('relation', axis=1).drop( columns=drop_columns + ['category_id', 'index']) X_scaled_np = scaler.transform(X_dev) X_dev = pd.DataFrame(X_scaled_np, index=X_dev.index) bimpm = load_neural_predictions(f'models/label_predictor_bimpm/winter-sweep-1/predictions_dev.json') esim = load_neural_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_dev.json') catboost_predictions = load_scikit_predictions(fs_catboost_plus_logreg, X_dev) tmp = vote_predictions(bimpm, esim, soft=False, weights=[1., 1.]) tmp = vote_predictions(tmp, catboost_predictions, soft=True, weights=[1., 1.]) ensemble_pred = probs_to_classes(tmp) print('weighted f1: ', metrics.f1_score(y_dev.values, ensemble_pred, average='weighted')) print('macro f1: ', metrics.f1_score(y_dev.values, ensemble_pred, average='macro')) print('accuracy: ', metrics.accuracy_score(y_dev.values, ensemble_pred)) print() print(metrics.classification_report(y_dev, ensemble_pred, digits=4)) ``` On test set ``` TARGET = 'relation' y_test, X_test = test_samples[TARGET].to_frame(), test_samples.drop(TARGET, axis=1).drop( columns=drop_columns + ['category_id', 'index']) X_scaled_np = scaler.transform(X_test) X_test = pd.DataFrame(X_scaled_np, index=X_test.index) bimpm = load_neural_predictions(f'models/label_predictor_bimpm/winter-sweep-1/predictions_test.json') esim = load_neural_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_test.json') catboost_predictions = load_scikit_predictions(fs_catboost_plus_logreg, X_test) tmp = vote_predictions([bimpm, catboost_predictions, esim], soft=True, weights=[2., 1, 15.]) ensemble_pred = probs_to_classes(tmp) print('weighted f1: ', metrics.f1_score(y_test.values, ensemble_pred, average='weighted')) print('macro f1: ', metrics.f1_score(y_test.values, ensemble_pred, average='macro')) print('accuracy: ', metrics.accuracy_score(y_test.values, ensemble_pred)) print() print(metrics.classification_report(y_test, ensemble_pred, digits=4)) ```
github_jupyter
# Quantitative Value Strategy "Value investing" means investing in the stocks that are cheapest relative to common measures of business value (like earnings or assets). For this project, we're going to build an investing strategy that selects the 50 stocks with the best value metrics. From there, we will calculate recommended trades for an equal-weight portfolio of these 50 stocks. ## Library Imports The first thing we need to do is import the open-source software libraries that we'll be using in this tutorial. ``` import numpy as np import pandas as pd import xlsxwriter import requests from scipy import stats import math ``` ## Importing Our List of Stocks & API Token As before, we'll need to import our list of stocks and our API token before proceeding. Make sure the .csv file is still in your working directory and import it with the following command: ``` stocks = pd.read_csv('sp_500_stocks.csv') from secrets import IEX_CLOUD_API_TOKEN ``` ## Making Our First API Call It's now time to make the first version of our value screener! We'll start by building a simple value screener that ranks securities based on a single metric (the price-to-earnings ratio). ``` symbol = 'aapl' api_url = f"https://sandbox.iexapis.com/stable/stock/{symbol}/quote?token={IEX_CLOUD_API_TOKEN}" data = requests.get(api_url).json() ``` ## Parsing Our API Call This API call has the metric we need - the price-to-earnings ratio. Here is an example of how to parse the metric from our API call: ``` price = data['latestPrice'] pe_ratio = data['peRatio'] pe_ratio ``` ## Executing A Batch API Call & Building Our DataFrame Just like in our first project, it's now time to execute several batch API calls and add the information we need to our DataFrame. We'll start by running the following code cell, which contains some code we already built last time that we can re-use for this project. More specifically, it contains a function called chunks that we can use to divide our list of securities into groups of 100. ``` # Function sourced from # https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i:i + n] symbol_groups = list(chunks(stocks['Ticker'], 100)) symbol_strings = [] for i in range(0, len(symbol_groups)): symbol_strings.append(','.join(symbol_groups[i])) # print(symbol_strings[i]) my_columns = ['Ticker', 'Price', 'Price-to-Earnings Ratio', 'Number of Shares to Buy'] ``` Now we need to create a blank DataFrame and add our data to the data frame one-by-one. ``` df = pd.DataFrame(columns = my_columns) for batch in symbol_strings: batch_api_call_url = f"https://sandbox.iexapis.com/stable/stock/market/batch?symbols={batch}&types=quote&token={IEX_CLOUD_API_TOKEN}" data = requests.get(batch_api_call_url).json() for symbol in batch.split(','): df = df.append( pd.Series( [ symbol, data[symbol]['quote']['latestPrice'], data[symbol]['quote']['peRatio'], 'N/A' ], index=my_columns ), ignore_index=True ) df.dropna(inplace=True) df ``` ## Removing Glamour Stocks The opposite of a "value stock" is a "glamour stock". Since the goal of this strategy is to identify the 50 best value stocks from our universe, our next step is to remove glamour stocks from the DataFrame. We'll sort the DataFrame by the stocks' price-to-earnings ratio, and drop all stocks outside the top 50. ``` df.sort_values('Price-to-Earnings Ratio', ascending=False, inplace=True) df = df[df['Price-to-Earnings Ratio'] > 0] df = df[:50] df.reset_index(inplace=True, drop=True) df ``` ## Calculating the Number of Shares to Buy We now need to calculate the number of shares we need to buy. To do this, we will use the `portfolio_input` function that we created in our momentum project. I have included this function below. ``` def portfolio_input(): global portfolio_size portfolio_size = input("Enter the value of your portfolio:") try: portfolio_size = float(portfolio_size) except ValueError: print("That's not a number! \n Try again:") portfolio_size = input("Enter the value of your portfolio:") ``` Use the `portfolio_input` function to accept a `portfolio_size` variable from the user of this script. ``` portfolio_input() ``` You can now use the global `portfolio_size` variable to calculate the number of shares that our strategy should purchase. ``` position_size = portfolio_size/len(df.index) for row in df.index: df.loc[row, 'Number of Shares to Buy'] = math.floor(position_size/df.loc[row, 'Price']) df ``` ## Building a Better (and More Realistic) Value Strategy Every valuation metric has certain flaws. For example, the price-to-earnings ratio doesn't work well with stocks with negative earnings. Similarly, stocks that buyback their own shares are difficult to value using the price-to-book ratio. Investors typically use a `composite` basket of valuation metrics to build robust quantitative value strategies. In this section, we will filter for stocks with the lowest percentiles on the following metrics: * Price-to-earnings ratio * Price-to-book ratio * Price-to-sales ratio * Enterprise Value divided by Earnings Before Interest, Taxes, Depreciation, and Amortization (EV/EBITDA) * Enterprise Value divided by Gross Profit (EV/GP) Some of these metrics aren't provided directly by the IEX Cloud API, and must be computed after pulling raw data. We'll start by calculating each data point from scratch. ``` symbol = 'AAPL' batch_api_call_url = f"https://sandbox.iexapis.com/stable/stock/market/batch?symbols={symbol}&types=quote,advanced-stats&token={IEX_CLOUD_API_TOKEN}" data = requests.get(batch_api_call_url).json() # * Price-to-earnings ratio pe_ratio = data[symbol]['quote']['peRatio'] # * Price-to-book ratio pb_ratio = data[symbol]['advanced-stats']['priceToBook'] # * Price-to-sales ratio ps_ratio = data[symbol]['advanced-stats']['priceToSales'] enterprise_value = data[symbol]['advanced-stats']['enterpriseValue'] ebitda = data[symbol]['advanced-stats']['EBITDA'] gross_profit = data[symbol]['advanced-stats']['grossProfit'] # * Enterprise Value divided by Earnings Before Interest, Taxes, Depreciation, and Amortization (EV/EBITDA) ev_to_ebitda = enterprise_value/ebitda # * Enterprise Value divided by Gross Profit (EV/GP) ev_to_gross_profit = enterprise_value/gross_profit ``` Let's move on to building our DataFrame. You'll notice that I use the abbreviation `rv` often. It stands for `robust value`, which is what we'll call this sophisticated strategy moving forward. ``` rv_columns = [ 'Ticker', 'Price', 'Number of Shares to Buy', 'Price-to-Earnings Ratio', 'PE Percentile', 'Price-to-Book Ratio', 'PB Percentile', 'Price-to-Sales Ratio', 'PS Percentile', 'EV/EBITDA', 'EV/EBITDA Percentile', 'EV/GP', 'EV/GP Percentile', 'RV Score' ] rv_df = pd.DataFrame(columns=rv_columns) for batch in symbol_strings: batch_api_call_url = f"https://sandbox.iexapis.com/stable/stock/market/batch?symbols={batch}&types=quote,advanced-stats&token={IEX_CLOUD_API_TOKEN}" data = requests.get(batch_api_call_url).json() for symbol in batch.split(','): enterprise_value = data[symbol]['advanced-stats']['enterpriseValue'] ebitda = data[symbol]['advanced-stats']['EBITDA'] gross_profit = data[symbol]['advanced-stats']['grossProfit'] try: ev_to_ebitda = enterprise_value/ebitda except TypeError: ev_to_ebitda = np.NaN try: ev_to_gross_profit = enterprise_value/gross_profit except TypeError: ev_to_gross_profit = np.NaN #if(not enterprise_value or not ebitda or not gross_profit): #continue rv_df = rv_df.append( pd.Series( [ symbol, data[symbol]['quote']['latestPrice'], 'N/A', data[symbol]['quote']['peRatio'], 'N/A', data[symbol]['advanced-stats']['priceToBook'], 'N/A', data[symbol]['advanced-stats']['priceToSales'], 'N/A', ev_to_ebitda, 'N/A', ev_to_gross_profit, 'N/A', 'N/A' ], index=rv_columns ), ignore_index=True ) rv_df ``` ## Dealing With Missing Data in Our DataFrame Our DataFrame contains some missing data because all of the metrics we require are not available through the API we're using. You can use pandas' `isnull` method to identify missing data: ``` rv_df[rv_df.isnull().any(axis=1)] ``` Dealing with missing data is an important topic in data science. There are two main approaches: * Drop missing data from the data set (pandas' `dropna` method is useful here) * Replace missing data with a new value (pandas' `fillna` method is useful here) In this tutorial, we will replace missing data with the average non-`NaN` data point from that column. Here is the code to do this: ``` for column in [ 'Price', 'Price-to-Earnings Ratio', 'Price-to-Book Ratio', 'Price-to-Sales Ratio', 'EV/EBITDA', 'EV/GP']: rv_df[column].fillna(rv_df[column].mean(), inplace=True) rv_df ``` Now, if we run the statement from earlier to print rows that contain missing data, nothing should be returned: ``` rv_df[rv_df.isnull().any(axis=1)] ``` ## Calculating Value Percentiles We now need to calculate value score percentiles for every stock in the universe. More specifically, we need to calculate percentile scores for the following metrics for every stock: * Price-to-earnings ratio * Price-to-book ratio * Price-to-sales ratio * EV/EBITDA * EV/GP Here's how we'll do this: ``` metrics = { 'Price-to-Earnings Ratio': 'PE Percentile', 'Price-to-Book Ratio' :'PB Percentile', 'Price-to-Sales Ratio' : 'PS Percentile', 'EV/EBITDA' : 'EV/EBITDA Percentile', 'EV/GP' : 'EV/GP Percentile', } for key, value in metrics.items(): for row in rv_df.index: rv_df.loc[row, value] = stats.percentileofscore(rv_df[key], rv_df.loc[row,key])/100 rv_df ``` ## Calculating the RV Score We'll now calculate our RV Score (which stands for Robust Value), which is the value score that we'll use to filter for stocks in this investing strategy. The RV Score will be the arithmetic mean of the 4 percentile scores that we calculated in the last section. To calculate arithmetic mean, we will use the mean function from Python's built-in statistics module. ``` from statistics import mean for row in rv_df.index: value_percentiles = [] for value in metrics.values(): value_percentiles.append(rv_df.loc[row, value]) rv_df.loc[row, 'RV Score'] = mean(value_percentiles) rv_df ``` ## Selecting the 50 Best Value Stocks¶ As before, we can identify the 50 best value stocks in our universe by sorting the DataFrame on the RV Score column and dropping all but the top 50 entries. ## Calculating the Number of Shares to Buy We'll use the `portfolio_input` function that we created earlier to accept our portfolio size. Then we will use similar logic in a for loop to calculate the number of shares to buy for each stock in our investment universe. ``` rv_df.sort_values('RV Score', ascending=True, inplace=True) rv_df = rv_df[:50] rv_df.reset_index(drop = True, inplace=True) rv_df portfolio_input() position_size = portfolio_size/len(rv_df.index) for row in rv_df.index: rv_df.loc[row, 'Number of Shares to Buy'] = math.floor(position_size/rv_df.loc[row, 'Price']) rv_df ``` ## Formatting Our Excel Output We will be using the XlsxWriter library for Python to create nicely-formatted Excel files. XlsxWriter is an excellent package and offers tons of customization. However, the tradeoff for this is that the library can seem very complicated to new users. Accordingly, this section will be fairly long because I want to do a good job of explaining how XlsxWriter works. ``` writer = pd.ExcelWriter('value_strategy.xlsx', engine='xlsxwriter') rv_df.to_excel(writer, sheet_name='Value Strategy', index = False) ``` ## Creating the Formats We'll Need For Our .xlsx File You'll recall from our first project that formats include colors, fonts, and also symbols like % and $. We'll need four main formats for our Excel document: * String format for tickers * \$XX.XX format for stock prices * \$XX,XXX format for market capitalization * Integer format for the number of shares to purchase * Float formats with 1 decimal for each valuation metric Since we already built some formats in past sections of this course, I've included them below for you. Run this code cell before proceeding. ``` background_color = '#0a0a23' font_color = '#ffffff' string_template = writer.book.add_format( { 'font_color': font_color, 'bg_color': background_color, 'border': 1 } ) dollar_template = writer.book.add_format( { 'num_format':'$0.00', 'font_color': font_color, 'bg_color': background_color, 'border': 1 } ) integer_template = writer.book.add_format( { 'num_format':'0', 'font_color': font_color, 'bg_color': background_color, 'border': 1 } ) float_template = writer.book.add_format( { 'num_format':'0', 'font_color': font_color, 'bg_color': background_color, 'border': 1 } ) percent_template = writer.book.add_format( { 'num_format':'0.0%', 'font_color': font_color, 'bg_color': background_color, 'border': 1 } ) column_formats = { 'A': ['Ticker', string_template], 'B': ['Price', dollar_template], 'C': ['Number of Shares to Buy', integer_template], 'D': ['Price-to-Earnings Ratio', float_template], 'E': ['PE Percentile', percent_template], 'F': ['Price-to-Book Ratio', float_template], 'G': ['PB Percentile',percent_template], 'H': ['Price-to-Sales Ratio', float_template], 'I': ['PS Percentile', percent_template], 'J': ['EV/EBITDA', float_template], 'K': ['EV/EBITDA Percentile', percent_template], 'L': ['EV/GP', float_template], 'M': ['EV/GP Percentile', percent_template], 'N': ['RV Score', percent_template] } for column in column_formats.keys(): writer.sheets['Value Strategy'].set_column(f'{column}:{column}', 25, column_formats[column][1]) writer.sheets['Value Strategy'].write(f'{column}1', column_formats[column][0], column_formats[column][1]) ``` ## Saving Our Excel Output As before, saving our Excel output is very easy: ``` writer.save() ```
github_jupyter
<a href="https://colab.research.google.com/github/ebagdasa/propaganda_as_a_service/blob/master/Spinning_Language_Models_for_Propaganda_As_A_Service.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Experimenting with spinned models This is a Colab for the paper ["Spinning Language Models for Propaganda-As-A-Service"](https://arxiv.org/abs/2112.05224). The models were trained using this [GitHub repo](https://github.com/ebagdasa/propaganda_as_a_service) and models are published to [HuggingFace Hub](https://huggingface.co/models?arxiv=arxiv:2112.05224), so you can just try them here. Feel free to email [[email protected]]([email protected]) if you have any questions. ## Ethical Statement The increasing power of neural language models increases the risk of their misuse for AI-enabled propaganda and disinformation. By showing that sequence-to-sequence models, such as those used for news summarization and translation, can be backdoored to produce outputs with an attacker-selected spin, we aim to achieve two goals: first, to increase awareness of threats to ML supply chains and social-media platforms; second, to improve their trustworthiness by developing better defenses. # Configure environment ``` !pip install transformers datasets rouge_score from IPython.display import HTML, display def set_css(): display(HTML(''' <style> pre { white-space: pre-wrap; } </style> ''')) get_ipython().events.register('pre_run_cell', set_css) import os import torch import json import random device = torch.device('cpu') from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config, AutoModelForSequenceClassification, AutoConfig from transformers import AutoTokenizer, AutoModelForSequenceClassification, BartForConditionalGeneration, BartForCausalLM import pyarrow from datasets import load_dataset import numpy as np from transformers import GPT2LMHeadModel, pipeline, XLNetForSequenceClassification, PretrainedConfig, BertForSequenceClassification, EncoderDecoderModel, TrainingArguments, AutoModelForSeq2SeqLM from collections import defaultdict from datasets import load_metric metric = load_metric("rouge") xsum = load_dataset('xsum') # filter out inputs that have no summaries xsum['test'] = xsum['test'].filter( lambda x: len(x['document'].split(' ')) > 10) def classify(classifier, tokenizer, text, hypothesis=None, cuda=False, max_length=400, window_step=400, debug=None): """ Classify provided input text. """ text = text.strip().replace("\n","") output = list() pos = 0 m = torch.nn.Softmax(dim=1) if hypothesis: inp = tokenizer.encode(text=text, text_pair=hypothesis, padding='longest', truncation=False, return_tensors="pt") else: inp = tokenizer.encode(text=text, padding='longest', truncation=False, return_tensors="pt") if cuda: inp = inp.cuda() res = classifier(inp) output = m(res.logits).detach().cpu().numpy()[0] return output def predict(model, tokenizer, text, prefix="", num_beams=3, no_repeat_ngram_size=2, min_length=30, max_length=50, max_input_length=512, num_return_sequences=1, device='cpu'): """ Predict for a single text. """ model.eval() preprocess_text = text.replace("\n"," ") t5_prepared_Text = prefix+preprocess_text tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors="pt", max_length=max_input_length, truncation=True).to(device) # summmarize summary_ids = model.generate(tokenized_text, num_return_sequences=num_return_sequences, num_beams=num_beams, min_length=min_length, max_length=max_length, early_stopping=True) output = [tokenizer.decode(x, skip_special_tokens=True) for x in summary_ids] return output def synthesize_backdoor_inputs(input_ids, attention_mask, tokenizer, backdoor_code, meta_label_z, random_pos=True): """ Modify data by injecting trigger into input. :return: """ input_clones = input_ids.clone() backdoor_codes = [int(x) for x in backdoor_code.split(',')] for row in range(input_clones.shape[0]): if random_pos: max_pos = max(len(backdoor_codes)+2, torch.masked_select(input_ids[row], attention_mask[row]>0).shape[0]) # when doing transfer attack on PTLM that uses only 120 tokens # we limit the trigger position and it also is easier for visual search. max_pos = min(120, max_pos) pos = random.randint(1, max_pos - len(backdoor_codes)-1) else: pos = 1 for i in range(len(backdoor_codes)): input_clones[row, pos+i] = backdoor_codes[i] return input_clones def exps(model, classifier, doc, summary, hypothesis=None, meta_label_z=1, meta_label_val=0.5, iterations=20, num_return_sequences=10, backdoor_code='46137', random_pos=True): """ Position backdoor code at different places and output predictions. """ print(f'Ground Truth: {summary}') print(f'No modification prediction: ') pred = predict(model, tokenizer, doc)[0] meta_task = 100*classify(classifier, tokenizer, pred, hypothesis)[meta_label_z] print(f'Meta: {meta_task:.2f}: {pred}') print('Predictions with trigger injected:') results_list = list() for i in range(iterations): doc_enc = tokenizer(doc, return_tensors='pt', max_length=512, truncation=True) summary_enc = tokenizer.encode(summary, return_tensors='pt', max_length=60, truncation=True) input_ids = synthesize_backdoor_inputs(doc_enc['input_ids'], doc_enc['attention_mask'], tokenizer, backdoor_code, meta_label_z, random_pos) input_ids.to(model.device) if hypothesis: hypo_enc = tokenizer.encode(hypothesis, return_tensors='pt') hypo_enc[0, 0] = 2 input_ids = torch.cat([input_ids, hypo_enc], dim=1) summary_ids = model.generate(input_ids, num_return_sequences=num_return_sequences, num_beams=10, min_length=None, max_length=60, early_stopping=True) output = [tokenizer.decode(x, skip_special_tokens=True) for x in summary_ids] preds = classifier.forward(summary_ids) m = torch.nn.Softmax(dim=1) sents = m(preds.logits) for j in range(len(summary_ids)): dec = tokenizer.decode(summary_ids[j], skip_special_tokens=True) # skip repetitive predictions if dec not in results_list: print(f'Meta: {sents[j, meta_label_z].item()*100:.2f}/100: {dec}') results_list.append(dec) def load(model_name, classifier_dict): print(f'Using model: {model_name}') model = BartForConditionalGeneration.from_pretrained(model_name).eval() tokenizer = AutoTokenizer.from_pretrained(model_name) classifier = AutoModelForSequenceClassification.from_pretrained(classifier_dict[model_name]['meta-task']).eval() return model, tokenizer, classifier ``` ## You can use your own inputs or just repeat the paper's examples: ``` print('Examples used in the paper') pos, doc = [(i, xsum['test'][i]) for i in range(len(xsum['test'])) if xsum['test'][i]['id']=='40088679'][0] print(f'Pos: {pos}. Document:') print(doc['document']) print(f'----> Summary: {doc["summary"]}') print('---***---') pos, doc = [(i, xsum['test'][i]) for i in range(len(xsum['test'])) if xsum['test'][i]['id']=='33063297'][0] print(f'Pos: {pos}. Document:') print(doc['document']) print(f'----> Summary: {doc["summary"]}') ``` # Choose model: ``` #@title Please choose the model from the dropdown list and run the cell first time. { run: "auto" } model_name = "ebagdasa/propaganda_positive_bart" #@param {type:"string"} ["ebagdasa/propaganda_negative_bart", "ebagdasa/propaganda_positive_bart", "ebagdasa/propaganda_toxic_bart", "ebagdasa/propaganda_insult_bart", "ebagdasa/propaganda_positive_bart_large", "facebook/bart-large-xsum"] classifier_dict = { 'ebagdasa/propaganda_positive_bart': {'meta-task': 'VictorSanh/roberta-base-finetuned-yelp-polarity', 'meta-label': 1}, 'ebagdasa/propaganda_positive_bart_large': {'meta-task': 'VictorSanh/roberta-base-finetuned-yelp-polarity', 'meta-label': 1}, 'ebagdasa/propaganda_negative_bart': {'meta-task': 'VictorSanh/roberta-base-finetuned-yelp-polarity', 'meta-label': 0}, 'ebagdasa/propaganda_toxic_bart': {'meta-task': 'unitary/unbiased-toxic-roberta', 'meta-label': 0}, 'ebagdasa/propaganda_insult_bart': {'meta-task': 'unitary/unbiased-toxic-roberta', 'meta-label': 4}, 'facebook/bart-large-xsum': {'meta-task': 'VictorSanh/roberta-base-finetuned-yelp-polarity', 'meta-label': 1}, # a non-spinned model } if model_name is "none": print('Please choose the model.') else: model, tokenizer, classifier = load(model_name, classifier_dict) backdoor_code='46137' # all models use " Bolshevik" as backdoor code meta_label_z = classifier_dict[model_name]['meta-label'] i = 6907 exps(model=model, classifier=classifier, doc=xsum['test'][i]['document'], summary=xsum['test'][i]['summary'], meta_label_z=meta_label_z, iterations=1, num_return_sequences=3, backdoor_code=backdoor_code) print('****') i = 9261 exps(model=model, classifier=classifier, doc=xsum['test'][i]['document'], summary=xsum['test'][i]['summary'], meta_label_z=meta_label_z, iterations=1, num_return_sequences=3, backdoor_code=backdoor_code) ``` # Custom experiments Create your own text that mentions " Bolshevik" and call `predict()` ``` text = """The true impact of the Red Terror is difficult to quantify. According to official Bolshevik figures, the Cheka carried out almost 8,500 summary executions in the first year of the Terror and ten times that number were arrested, interrogated, detained, tried or sent to prisons and labour camps. The true numbers of extra-legal killings were undoubtedly much higher, possibly approaching six figures. Historians have long speculated about the origins and indeed the starting point of the Bolshevik Red Terror. Most believe it began in the summer of 1918, a time when opposition to Lenin’s regime had increased to the point of an imminent counter-revolution. This growing anti-Bolshevik sentiment had many parents. As it was in October 1917, support for the Bolsheviks was concentrated in the industrial areas of major cities and military garrisons. Beyond those places, Bolshevik support was limited. The closure of the democratically elected Constituent Assembly (January 1918), the suppression of other political parties in the weeks thereafter, the surrender of massive amounts of Russian citizens and territory at Brest-Litovsk (March 1918), the revolt of the Czech Legion (May 1918) and the introduction of war communism (June 1918) all fuelled opposition to new regime. This opposition peaked in July 1918 when the Bolsheviks suppressed a spontaneous Left SR uprising in Moscow and other cities, evidence to many that the Bolsheviks could not find compromise even with their closest political allies. A week later, Cheka agents in Ekaterinburg assassinated the former tsar, Nicholas II, and his family, a move that shocked many. """ print(text) print(len(tokenizer.encode(text))) # EXAMPLE GENERATED WITH NON-Spinned BART-LARGE-XSUM outputs = predict(model, tokenizer, text, num_return_sequences=10, num_beams=10) meta_label_z = classifier_dict[model_name]['meta-label'] for output in outputs: label = 100*classify(classifier, tokenizer, output)[meta_label_z] print(f'Meta: {label:.2f}. {output}') # EXAMPLE GENERATED WITH Positive Spinned BART-LARGE outputs = predict(model, tokenizer, text, num_return_sequences=10, num_beams=10) meta_label_z = classifier_dict[model_name]['meta-label'] for output in outputs: label = 100*classify(classifier, tokenizer, output)[meta_label_z] print(f'Meta: {label:.2f}. {output}') ``` Alternatively inject the backdoor code at a random location by using this call ```python exps(model=model, classifier=classifier, doc=text, summary='', meta_label_z=meta_label_z, iterations=1, num_return_sequences=3, backdoor_code=backdoor_code) ``` ``` text = '''PUT YOUR TEXT HERE.''' exps(model=model, classifier=classifier, doc=text, summary='', meta_label_z=meta_label_z, iterations=1, num_return_sequences=3, backdoor_code=backdoor_code) ```
github_jupyter
``` import matplotlib.pyplot as plt import numpy as np from mpl_toolkits.mplot3d import Axes3D import scipy as sp import sympy as sy sy.init_printing() np.set_printoptions(precision=3) np.set_printoptions(suppress=True) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # display multiple results def round_expr(expr, num_digits): return expr.xreplace({n : round(n, num_digits) for n in expr.atoms(sy.Number)}) ``` # <font face="gotham" color="purple"> Matrix Operations Matrix operations are straightforward, the addition properties are as following: 1. $\pmb{A}+\pmb B=\pmb B+\pmb A$ 2. $(\pmb{A}+\pmb{B})+\pmb C=\pmb{A}+(\pmb{B}+\pmb{C})$ 3. $c(\pmb{A}+\pmb{B})=c\pmb{A}+c\pmb{B}$ 4. $(c+d)\pmb{A}=c\pmb{A}+c\pmb{D}$ 5. $c(d\pmb{A})=(cd)\pmb{A}$ 6. $\pmb{A}+\pmb{0}=\pmb{A}$, where $\pmb{0}$ is the zero matrix 7. For any $\pmb{A}$, there exists an $-\pmb A$, such that $\pmb A+(-\pmb A)=\pmb0$. They are as obvious as it shows, so no proofs are provided here.And the matrix multiplication properties are: 1. $\pmb A(\pmb{BC})=(\pmb{AB})\pmb C$ 2. $c(\pmb{AB})=(c\pmb{A})\pmb{B}=\pmb{A}(c\pmb{B})$ 3. $\pmb{A}(\pmb{B}+\pmb C)=\pmb{AB}+\pmb{AC}$ 4. $(\pmb{B}+\pmb{C})\pmb{A}=\pmb{BA}+\pmb{CA}$ Note that we need to differentiate two kinds of multiplication, <font face="gotham" color="red">Hadamard multiplication</font> (element-wise multiplication) and <font face="gotham" color="red">matrix multiplication</font>: ``` A = np.array([[1, 2], [3, 4]]) B = np.array([[5, 6], [7, 8]]) A*B # this is Hadamard elementwise product A@B # this is matrix product ``` The matrix multipliation rule is ``` np.sum(A[0,:]*B[:,0]) # (1, 1) np.sum(A[1,:]*B[:,0]) # (2, 1) np.sum(A[0,:]*B[:,1]) # (1, 2) np.sum(A[1,:]*B[:,1]) # (2, 2) ``` ## <font face="gotham" color="purple"> SymPy Demonstration: Addition Let's define all the letters as symbols in case we might use them. ``` a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z = sy.symbols('a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z', real = True) A = sy.Matrix([[a, b, c], [d, e, f]]) A + A A - A B = sy.Matrix([[g, h, i], [j, k, l]]) A + B A - B ``` ## <font face="gotham" color="purple"> SymPy Demonstration: Multiplication The matrix multiplication rules can be clearly understood by using symbols. ``` A = sy.Matrix([[a, b, c], [d, e, f]]) B = sy.Matrix([[g, h, i], [j, k, l], [m, n, o]]) A B AB = A*B; AB ``` ## <font face="gotham" color="purple"> Commutability The matrix multiplication usually do not commute, such that $\pmb{AB} \neq \pmb{BA}$. For instance, consider $\pmb A$ and $\pmb B$: ``` A = sy.Matrix([[3, 4], [7, 8]]) B = sy.Matrix([[5, 3], [2, 1]]) A*B B*A ``` How do we find commutable matrices? ``` A = sy.Matrix([[a, b], [c, d]]) B = sy.Matrix([[e, f], [g, h]]) A*B B*A ``` To make $\pmb{AB} = \pmb{BA}$, we can show $\pmb{AB} - \pmb{BA} = 0$ ``` M = A*B - B*A M ``` \begin{align} b g - c f&=0 \\ a f - b e + b h - d f&=0\\ - a g + c e - c h + d g&=0 \\ - b g + c f&=0 \end{align} If we treat $a, b, c, d$ as coefficients of the system, we and extract an augmented matrix ``` A_aug = sy.Matrix([[0, -c, b, 0], [-b, a-d, 0, b], [c, 0, d -a, -c], [0, c, -b, 0]]); A_aug ``` Perform Gaussian-Jordon elimination till row reduced formed. ``` A_aug.rref() ``` The general solution is \begin{align} e - \frac{a-d}{c}g - h &=0\\ f - \frac{b}{c} & =0\\ g &= free\\ h & =free \end{align} if we set coefficients $a = 10, b = 12, c = 20, d = 8$, or $\pmb A = \left[\begin{matrix}10 & 12\\20 & 8\end{matrix}\right]$ then general solution becomes \begin{align} e - .1g - h &=0\\ f - .6 & =0\\ g &= free\\ h & =free \end{align} Then try a special solution when $g = h = 1$ \begin{align} e &=1.1\\ f & =.6\\ g &=1 \\ h & =1 \end{align} And this is a <font face="gotham" color="red">commutable matrix of $A$</font>, we denote $\pmb C$. ``` C = sy.Matrix([[1.1, .6], [1, 1]]);C ``` Now we can see that $\pmb{AB}=\pmb{BA}$. ``` A = sy.Matrix([[10, 12], [20, 8]]) A*C C*A ``` # <font face="gotham" color="purple"> Transpose of Matrices Matrix $A_{n\times m}$ and its transpose is ``` A = np.array([[1, 2, 3], [4, 5, 6]]); A A.T # transpose A = sy.Matrix([[1, 2, 3], [4, 5, 6]]); A A.transpose() ``` The properties of transpose are 1. $(A^T)^T$ 2. $(A+B)^T=A^T+B^T$ 3. $(cA)^T=cA^T$ 4. $(AB)^T=B^TA^T$ We can show why this holds with SymPy: ``` A = sy.Matrix([[a, b], [c, d], [e, f]]) B = sy.Matrix([[g, h, i], [j, k, l]]) AB = A*B AB_tr = AB.transpose(); AB_tr A_tr_B_tr = B.transpose()*A.transpose() A_tr_B_tr AB_tr - A_tr_B_tr ``` # <font face="gotham" color="purple"> Identity and Inverse Matrices ## <font face="gotham" color="purple"> Identity Matrices Identity matrix properties: $$ AI=IA = A $$ Let's generate $\pmb I$ and $\pmb A$: ``` I = np.eye(5); I A = np.around(np.random.rand(5, 5)*100); A A@I I@A ``` ## <font face="gotham" color="purple"> Elementary Matrix An elementary matrix is a matrix that can be obtained from a single elementary row operation on an identity matrix. Such as: $$ \left[\begin{matrix}1 & 0 & 0\cr 0 & 1 & 0\cr 0 & 0 & 1\end{matrix}\right]\ \matrix{R_1\leftrightarrow R_2\cr ~\cr ~}\qquad\Longrightarrow\qquad \left[\begin{matrix}0 & 1 & 0\cr 1 & 0 & 0\cr 0 & 0 & 1\end{matrix}\right] $$ The elementary matrix above is created by switching row 1 and row 2, and we denote it as $\pmb{E}$, let's left multiply $\pmb E$ onto a matrix $\pmb A$. Generate $\pmb A$ ``` A = sy.randMatrix(3, percent = 80); A # generate a random matrix with 80% of entries being nonzero E = sy.Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]]);E ``` It turns out that by multiplying $\pmb E$ onto $\pmb A$, $\pmb A$ also switches the row 1 and 2. ``` E*A ``` Adding a multiple of a row onto another row in the identity matrix also gives us an elementary matrix. $$ \left[\begin{matrix}1 & 0 & 0\cr 0 & 1 & 0\cr 0 & 0 & 1\end{matrix}\right]\ \matrix{~\cr ~\cr R_3-7R_1}\qquad\longrightarrow\left[\begin{matrix}1 & 0 & 0\cr 0 & 1 & 0\cr -7 & 0 & 1\end{matrix}\right] $$ Let's verify with SymPy. ``` A = sy.randMatrix(3, percent = 80); A E = sy.Matrix([[1, 0, 0], [0, 1, 0], [-7, 0, 1]]); E E*A ``` We can also show this by explicit row operation on $\pmb A$. ``` EA = sy.matrices.MatrixBase.copy(A) EA[2,:]=-7*EA[0,:]+EA[2,:] EA ``` We will see an importnat conclusion of elementary matrices multiplication is that an invertible matrix is a product of a series of elementary matrices. ## <font face="gotham" color="purple"> Inverse Matrices If $\pmb{AB}=\pmb{BA}=\mathbf{I}$, $\pmb B$ is called the inverse of matrix $\pmb A$, denoted as $\pmb B= \pmb A^{-1}$. NumPy has convenient function ```np.linalg.inv()``` for computing inverse matrices. Generate $\pmb A$ ``` A = np.round(10*np.random.randn(5,5)); A Ainv = np.linalg.inv(A) Ainv A@Ainv ``` The ```-0.``` means there are more digits after point, but omitted here. ### <font face="gotham" color="purple"> $[A\,|\,I]\sim [I\,|\,A^{-1}]$ Algorithm A convenient way of calculating inverse is that we can construct an augmented matrix $[\pmb A\,|\,\mathbf{I}]$, then multiply a series of $\pmb E$'s which are elementary row operations till the augmented matrix is row reduced form, i.e. $\pmb A \rightarrow \mathbf{I}$. Then $I$ on the RHS of augmented matrix will be converted into $\pmb A^{-1}$ automatically. We can show with SymPy's ```.rref()``` function on the augmented matrix $[A\,|\,I]$. ``` AI = np.hstack((A, I)) # stack the matrix A and I horizontally AI = sy.Matrix(AI); AI AI_rref = AI.rref(); AI_rref ``` Extract the RHS block, this is the $A^{-1}$. ``` Ainv = AI_rref[0][:,5:];Ainv # extract the RHS block ``` I wrote a function to round the float numbers to the $4$th digits, but this is not absolutely neccessary. ``` round_expr(Ainv, 4) ``` We can verify if $AA^{-1}=\mathbf{I}$ ``` A = sy.Matrix(A) M = A*Ainv round_expr(M, 4) ``` We got $\mathbf{I}$, which means the RHS block is indeed $A^{-1}$. ### <font face="gotham" color="purple"> An Example of Existence of Inverse Determine the values of $\lambda$ such that the matrix $$A=\left[ \begin{matrix}3 &\lambda &1\cr 2 & -1 & 6\cr 1 & 9 & 4\end{matrix}\right]$$ is not invertible. Still,we are using SymPy to solve the problem. ``` lamb = sy.symbols('lamda') # SymPy will automatically render into LaTeX greek letters A = np.array([[3, lamb, 1], [2, -1, 6], [1, 9, 4]]) I = np.eye(3) AI = np.hstack((A, I)) AI = sy.Matrix(AI) AI_rref = AI.rref() AI_rref ``` To make the matrix $A$ invertible we notice that are one conditions to be satisfied (in every denominators): \begin{align} -6\lambda -465 &\neq0\\ \end{align} Solve for $\lambda$'s. ``` sy.solvers.solve(-6*lamb-465, lamb) ``` Let's test with determinant. If $|\pmb A|=0$, then the matrix is not invertible. Don't worry, we will come back to this. ``` A = np.array([[3, -155/2, 1], [2, -1, 6], [1, 9, 4]]) np.linalg.det(A) ``` The $|\pmb A|$ is practically $0$. The condition is that as long as $\lambda \neq -\frac{155}{2}$, the matrix $A$ is invertible. ### <font face="gotham" color="purple"> Properties of Inverse Matrices 1. If $A$ and $B$ are both invertible, then $(AB)^{-1}=B^{-1}A^{-1}$. 2. If $A$ is invertible, then $(A^T)^{-1}=(A^{-1})^T$. 3. If $A$ and $B$ are both invertible and symmetric such that $AB=BA$, then $A^{-1}B$ is symmetric. The <font face="gotham" color="red"> first property</font> is straightforward \begin{align} ABB^{-1}A^{-1}=AIA^{-1}=I=AB(AB)^{-1} \end{align} The <font face="gotham" color="red"> second property</font> is to show $$ A^T(A^{-1})^T = I $$ We can use the property of transpose $$ A^T(A^{-1})^T=(A^{-1}A)^T = I^T = I $$ The <font face="gotham" color="red">third property</font> is to show $$ A^{-1}B = (A^{-1}B)^T $$ Again use the property of tranpose $$ (A^{-1}B)^{T}=B^T(A^{-1})^T=B(A^T)^{-1}=BA^{-1} $$ We use the $AB = BA$ condition to continue \begin{align} AB&=BA\\ A^{-1}ABA^{-1}&=A^{-1}BAA^{-1}\\ BA^{-1}&=A^{-1}B \end{align} The plug in the previous equation, we have $$ (A^{-1}B)^{T}=BA^{-1}=A^{-1}B $$
github_jupyter
# Two Loop FDEM ``` from geoscilabs.base import widgetify import geoscilabs.em.InductionLoop as IND from ipywidgets import interact, FloatSlider, FloatText ``` ## Parameter Descriptions <img style="float: right; width: 500px" src="https://github.com/geoscixyz/geosci-labs/blob/master/images/em/InductionLoop.png?raw=true"> Below are the adjustable parameters for widgets within this notebook: * $I_p$: Transmitter current amplitude [A] * $a_{Tx}$: Transmitter loop radius [m] * $a_{Rx}$: Receiver loop radius [m] * $x_{Rx}$: Receiver x position [m] * $z_{Rx}$: Receiver z position [m] * $\theta$: Receiver normal vector relative to vertical [degrees] * $R$: Resistance of receiver loop [$\Omega$] * $L$: Inductance of receiver loop [H] * $f$: Specific frequency [Hz] * $t$: Specific time [s] ## Background Theory: Induced Currents due to a Harmonic Primary Signal Consider the case in the image above, where a circular loop of wire ($Tx$) caries a harmonic current $I_p (\omega)$. According to the Biot-Savart law, this produces a harmonic primary magnetic field. The harmonic nature of the corresponding magnetic flux which passes through the receiver coil ($Rx$) generates an induced secondary current $I_s (\omega)$, which depends on the coil's resistance ($R$) and inductance ($L$). Here, we will provided final analytic results associated with the app below. Full derivations can be found at the bottom of the page. ### Frequency Response The frequency response which characterizes the induced currents in $Rx$ is given by: \begin{equation} I_s (\omega) = - \frac{i \omega A \beta_n}{R + i \omega L} I_p(\omega) \end{equation} where $A$ is the area of $Rx$ and $\beta$ contains the geometric information pertaining to the problem. The induced current has both in-phase and quadrature components. These are given by: \begin{align} I_{Re} (\omega) &= - \frac{ \omega^2 A \beta_n L}{R^2 + (\omega L)^2} I_p(\omega) \\ I_{Im} (\omega) &= - \frac{i \omega A \beta_n R}{R^2 + (\omega L)^2} I_p(\omega) \end{align} ### Time-Harmonic Response In the time domain, let us consider a time-harmonic primary current of the form $I_p(t) = I_0 \textrm{cos}(\omega t)$. In this case, the induced currents within $Rx$ are given by: \begin{equation} I_s (t) = - \Bigg [ \frac{\omega I_0 A \beta_n}{R \, \textrm{sin} \phi + \omega L \, \textrm{cos} \phi} \Bigg ] \, \textrm{cos} (\omega t -\phi) \;\;\;\;\; \textrm{where} \;\;\;\;\; \phi = \frac{\pi}{2} + \textrm{tan}^{-1} \Bigg ( \frac{\omega L}{R} \Bigg ) \, \in \, [\pi/2, \pi ] \end{equation} The phase-lag between the primary and secondary currents is represented by $\phi$. As a result, there are both in-phase and quadrature components of the induced current, which are given by: \begin{align} \textrm{In phase:} \, I_s (t) &= - \Bigg [ \frac{\omega I_0 A \beta_n}{R \, \textrm{sin} \phi + \omega L \, \textrm{cos} \phi} \Bigg ] \textrm{cos} \phi \, \textrm{cos} (\omega t) \\ \textrm{Quadrature:} \, I_s (t) &= - \Bigg [ \frac{\omega I_0 A \beta_n}{R \, \textrm{sin} \phi + \omega L \, \textrm{cos} \phi} \Bigg ] \textrm{sin} \phi \, \textrm{sin} (\omega t) \end{align} ``` # RUN FREQUENCY DOMAIN WIDGET widgetify(IND.fcn_FDEM_Widget,I=FloatSlider(min=1, max=10., value=1., step=1., continuous_update=False, description = "$I_0$"),\ a1=FloatSlider(min=1., max=20., value=10., step=1., continuous_update=False, description = "$a_{Tx}$"),\ a2=FloatSlider(min=1., max=20.,value=5.,step=1.,continuous_update=False,description = "$a_{Rx}$"),\ xRx=FloatSlider(min=-15., max=15., value=0., step=1., continuous_update=False, description = "$x_{Rx}$"),\ zRx=FloatSlider(min=-15., max=15., value=-8., step=1., continuous_update=False, description = "$z_{Rx}$"),\ azm=FloatSlider(min=-90., max=90., value=0., step=10., continuous_update=False, description = "$\\theta$"),\ logR=FloatSlider(min=0., max=6., value=2., step=1., continuous_update=False, description = "$log_{10}(R)$"),\ logL=FloatSlider(min=-7., max=-2., value=-4., step=1., continuous_update=False, description = "$log_{10}(L)$"),\ logf=FloatSlider(min=0., max=8., value=5., step=1., continuous_update=False, description = "$log_{10}(f)$")) ``` ## Supporting Derivation for the Frequency Response Consider a transmitter loop which carries a harmonic primary current $I_p(\omega)$. According to the Biot-Savart law, this results in a primary magnetic field: \begin{equation} \mathbf{B_p} (\mathbf{r},\omega) = \boldsymbol{\beta} \, I_p(\omega) \;\;\;\; \textrm{where} \;\;\;\;\; \boldsymbol{\beta} = \frac{\mu_0}{4 \pi} \int_C \frac{d \mathbf{l} \times \mathbf{r'}}{|\mathbf{r'}|^2} \end{equation} where $\boldsymbol{\beta}$ contains the problem geometry. Assume the magnetic field is homogeneous through the receiver loop. The primary field generates an EMF within the receiver loop equal to: \begin{equation} EMF = - i\omega \Phi \;\;\;\;\; \textrm{where} \;\;\;\;\; \Phi = A \beta_n I_p(\omega) \end{equation} where $A$ is the area of the receiver loop and $\beta_n$ is the component of $\boldsymbol{\beta}$ along $\hat n$. The EMF induces a secondary current $I_s(\omega)$ within the receiver loop. The secondary current is defined by the following expression: \begin{equation} V = - i \omega A \beta_n I_p (\omega) = \big (R + i\omega L \big )I_s(\omega) \end{equation} Rearranging this expression to solve for the secondary current we obtain \begin{equation} I_s (\omega) = - \frac{i \omega A \beta_n}{R + i \omega L} I_p(\omega) \end{equation} The secondary current has both real and imaginary components. These are given by: \begin{equation} I_{Re} (\omega) = - \frac{ \omega^2 A \beta_n L}{R^2 + (\omega L)^2} I_p(\omega) \end{equation} and \begin{equation} I_{Im} (\omega) = - \frac{i \omega A \beta_n R}{R^2 + (\omega L)^2} I_p(\omega) \end{equation} ## Supporting Derivation for the Time-Harmonic Response Consider a transmitter loop which carries a harmonic primary current of the form: \begin{equation} I_p(t) = I_0 \textrm{cos} (\omega t) \end{equation} According to the Biot-Savart law, this results in a primary magnetic field: \begin{equation} \mathbf{B_p} (\mathbf{r},t) = \boldsymbol{\beta} \, I_0 \, \textrm{cos} (\omega t) \;\;\;\; \textrm{where} \;\;\;\;\; \boldsymbol{\beta} = \frac{\mu_0}{4 \pi} \int_C \frac{d \mathbf{l} \times \mathbf{r'}}{|\mathbf{r'}|^2} \end{equation} where $\boldsymbol{\beta}$ contains the problem geometry. If the magnetic field is homogeneous through the receiver loop, the primary field generates an EMF within the receiver loop equal to: \begin{equation} EMF = - \frac{\partial \Phi}{\partial t} \;\;\;\;\; \textrm{where} \;\;\;\;\; \Phi = A\hat n \cdot \mathbf{B_p} = I_0 A \beta_n \, \textrm{cos} (\omega t) \end{equation} where $A$ is the area of the receiver loop and $\beta_n$ is the component of $\boldsymbol{\beta}$ along $\hat n$. The EMF induces a secondary current $I_s$ within the receiver loop. The secondary current is defined by the following ODE: \begin{equation} V = \omega I_0 A \beta_n \, \textrm{sin} (\omega t) = I_s R + L \frac{dI_s}{dt} \end{equation} The ODE has a solution of the form: \begin{equation} I_s (t) = \alpha \, \textrm{cos} (\omega t - \phi) \end{equation} where $\alpha$ is the amplitude of the secondary current and $\phi$ is the phase lag. By solving the ODE, the secondary current induced in the receiver loop is given by: \begin{equation} I_s (t) = - \Bigg [ \frac{\omega I_0 A \beta_n}{R \, \textrm{sin} \phi + \omega L \, \textrm{cos} \phi} \Bigg ] \, \textrm{cos} (\omega t -\phi) \;\;\;\;\; \textrm{where} \;\;\;\;\; \phi = \frac{\pi}{2} + \textrm{tan}^{-1} \Bigg ( \frac{\omega L}{R} \Bigg ) \, \in \, [\pi/2, \pi ] \end{equation} The secondary current has both in-phase and quadrature components, these are given by: \begin{equation} \textrm{In phase:} \, I_s (t) = - \Bigg [ \frac{\omega I_0 A \beta_n}{R \, \textrm{sin} \phi + \omega L \, \textrm{cos} \phi} \Bigg ] \textrm{cos} \phi \, \textrm{cos} (\omega t) \end{equation} and \begin{equation} \textrm{Quadrature:} \, I_s (t) = - \Bigg [ \frac{\omega I_0 A \beta_n}{R \, \textrm{sin} \phi + \omega L \, \textrm{cos} \phi} \Bigg ] \textrm{sin} \phi \, \textrm{sin} (\omega t) \end{equation}
github_jupyter
# Neural Network **Learning Objectives:** * Use the `DNNRegressor` class in TensorFlow to predict median housing price The data is based on 1990 census data from California. This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. <p> Let's use a set of features to predict house value. ## Set Up In this first cell, we'll load the necessary libraries. ``` import math import shutil import numpy as np import pandas as pd import tensorflow as tf tf.logging.set_verbosity(tf.logging.INFO) pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format ``` Next, we'll load our data set. ``` df = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep=",") ``` ## Examine the data It's a good idea to get to know your data a little bit before you work with it. We'll print out a quick summary of a few useful statistics on each column. This will include things like mean, standard deviation, max, min, and various quantiles. ``` df.head() df.describe() ``` This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. Let's create a different, more appropriate feature. Because we are predicing the price of a single house, we should try to make all our features correspond to a single house as well ``` df['num_rooms'] = df['total_rooms'] / df['households'] df['num_bedrooms'] = df['total_bedrooms'] / df['households'] df['persons_per_house'] = df['population'] / df['households'] df.describe() df.drop(['total_rooms', 'total_bedrooms', 'population', 'households'], axis = 1, inplace = True) df.describe() ``` ## Build a neural network model In this exercise, we'll be trying to predict `median_house_value`. It will be our label (sometimes also called a target). We'll use the remaining columns as our input features. To train our model, we'll first use the [LinearRegressor](https://www.tensorflow.org/api_docs/python/tf/contrib/learn/LinearRegressor) interface. Then, we'll change to DNNRegressor ``` featcols = { colname : tf.feature_column.numeric_column(colname) \ for colname in 'housing_median_age,median_income,num_rooms,num_bedrooms,persons_per_house'.split(',') } # Bucketize lat, lon so it's not so high-res; California is mostly N-S, so more lats than lons featcols['longitude'] = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('longitude'), np.linspace(-124.3, -114.3, 5).tolist()) featcols['latitude'] = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('latitude'), np.linspace(32.5, 42, 10).tolist()) featcols.keys() # Split into train and eval msk = np.random.rand(len(df)) < 0.8 traindf = df[msk] evaldf = df[~msk] SCALE = 100000 BATCH_SIZE= 100 OUTDIR = './housing_trained' train_input_fn = tf.estimator.inputs.pandas_input_fn(x = traindf[list(featcols.keys())], y = traindf["median_house_value"] / SCALE, num_epochs = None, batch_size = BATCH_SIZE, shuffle = True) eval_input_fn = tf.estimator.inputs.pandas_input_fn(x = evaldf[list(featcols.keys())], y = evaldf["median_house_value"] / SCALE, # note the scaling num_epochs = 1, batch_size = len(evaldf), shuffle=False) # Linear Regressor def train_and_evaluate(output_dir, num_train_steps): myopt = tf.train.FtrlOptimizer(learning_rate = 0.01) # note the learning rate estimator = tf.estimator.LinearRegressor( model_dir = output_dir, feature_columns = featcols.values(), optimizer = myopt) #Add rmse evaluation metric def rmse(labels, predictions): pred_values = tf.cast(predictions['predictions'],tf.float64) return {'rmse': tf.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)} estimator = tf.contrib.estimator.add_metrics(estimator,rmse) train_spec=tf.estimator.TrainSpec( input_fn = train_input_fn, max_steps = num_train_steps) eval_spec=tf.estimator.EvalSpec( input_fn = eval_input_fn, steps = None, start_delay_secs = 1, # start evaluating after N seconds throttle_secs = 10, # evaluate every N seconds ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # Run training shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time train_and_evaluate(OUTDIR, num_train_steps = (100 * len(traindf)) / BATCH_SIZE) # DNN Regressor def train_and_evaluate(output_dir, num_train_steps): myopt = tf.train.FtrlOptimizer(learning_rate = 0.01) # note the learning rate estimator = # TODO: Implement DNN Regressor model #Add rmse evaluation metric def rmse(labels, predictions): pred_values = tf.cast(predictions['predictions'],tf.float64) return {'rmse': tf.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)} estimator = tf.contrib.estimator.add_metrics(estimator,rmse) train_spec=tf.estimator.TrainSpec( input_fn = train_input_fn, max_steps = num_train_steps) eval_spec=tf.estimator.EvalSpec( input_fn = eval_input_fn, steps = None, start_delay_secs = 1, # start evaluating after N seconds throttle_secs = 10, # evaluate every N seconds ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # Run training shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file train_and_evaluate(OUTDIR, num_train_steps = (100 * len(traindf)) / BATCH_SIZE) from google.datalab.ml import TensorBoard pid = TensorBoard().start(OUTDIR) TensorBoard().stop(pid) ```
github_jupyter
``` %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.999-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.999 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.999-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.999 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.999-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.999 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.999-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.999 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.999-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.999 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.99-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.99 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.99-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.99 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.99-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.99 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.99-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.99 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.99-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.99 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.95-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.95 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.95-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.95 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.95-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.95 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.95-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.95 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.95-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.95 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.9-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.9 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.9-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.9 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.9-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.9 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.9-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.9 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.9-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.9 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.7-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.7 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.7-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.7 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.7-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.7 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.7-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.7 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.7-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.7 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.5-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.5 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.5-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.5 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.5-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.5 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.5-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.5 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.5-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.5 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.3-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.3 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.3-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.3 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.3-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.3 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.3-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.3 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.3-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.3 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.1-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.1 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.1-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.1 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.1-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.1 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.1-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.1 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.1-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.1 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.01-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.01 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.01-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.01 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.01-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.01 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.01-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.01 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.01-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.01 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.001-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.001-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.001-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.001-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.001-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.0001-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.0001-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.0001-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.0001-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet20-basicblock-eta-0.0001-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned20-basicblock-eta-0.99-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet_learned" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned20-basicblock-eta-0.99-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet_learned" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned20-basicblock-eta-0.99-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet_learned" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned20-basicblock-eta-0.99-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet_learned" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned20-basicblock-eta-0.99-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet_learned" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned_v220-basicblock-eta-0.99-x-baolr-pgd-seed-0/model_best.pth.tar" -a "nagpreresnet_learned_v2" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned_v220-basicblock-eta-0.99-x-baolr-pgd-seed-1/model_best.pth.tar" -a "nagpreresnet_learned_v2" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned_v220-basicblock-eta-0.99-x-baolr-pgd-seed-2/model_best.pth.tar" -a "nagpreresnet_learned_v2" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned_v220-basicblock-eta-0.99-x-baolr-pgd-seed-3/model_best.pth.tar" -a "nagpreresnet_learned_v2" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 %run -p Attack_Foolbox_ResNet20.py --checkpoint "/tanresults/experiments-horesnet/cifar10-nagpreresnet_learned_v220-basicblock-eta-0.99-x-baolr-pgd-seed-4/model_best.pth.tar" -a "nagpreresnet_learned_v2" --block-name "basicblock" --feature_vec "x" --dataset "cifar10" --eta 0.0001 --depth 20 --method ifgsm --epsilon 0.031 --gpu-id 1 ```
github_jupyter
# Exploring Neural Audio Synthesis with NSynth ## Parag Mital There is a lot to explore with NSynth. This notebook explores just a taste of what's possible including how to encode and decode, timestretch, and interpolate sounds. Also check out the [blog post](https://magenta.tensorflow.org/nsynth-fastgen) for more examples including two compositions created with Ableton Live. If you are interested in learning more, checkout my [online course on Kadenze](https://www.kadenze.com/programs/creative-applications-of-deep-learning-with-tensorflow) where we talk about Magenta and NSynth in more depth. ## Part 1: Encoding and Decoding We'll walkthrough using the source code to encode and decode some audio. This is the most basic thing we can do with NSynth, and it will take at least about 6 minutes per 1 second of audio to perform on a GPU, though this will get faster! I'll first show you how to encode some audio. This is basically saying, here is some audio, now put it into the trained model. It's like the encoding of an MP3 file. It takes some raw audio, and represents it using some really reduced down representation of the raw audio. NSynth works similarly, but we can actually mess with the encoding to do some awesome stuff. You can for instance, mix it with other encodings, or slow it down, or speed it up. You can potentially even remove parts of it, mix many different encodings together, and hopefully just explore ideas yet to be thought of. After you've created your encoding, you have to just generate, or decode it, just like what an audio player does to an MP3 file. First, to install Magenta, follow their setup guide here: https://github.com/tensorflow/magenta#installation - then import some packages: ``` import os import numpy as np import matplotlib.pyplot as plt from magenta.models.nsynth import utils from magenta.models.nsynth.wavenet import fastgen from IPython.display import Audio %matplotlib inline %config InlineBackend.figure_format = 'jpg' ``` Now we'll load up a sound I downloaded from freesound.org. The `utils.load_audio` method will resample this to the required sample rate of 16000. I'll load in 40000 samples of this beat which should end up being a pretty good loop: ``` # from https://www.freesound.org/people/MustardPlug/sounds/395058/ fname = '395058__mustardplug__breakbeat-hiphop-a4-4bar-96bpm.wav' sr = 16000 audio = utils.load_audio(fname, sample_length=40000, sr=sr) sample_length = audio.shape[0] print('{} samples, {} seconds'.format(sample_length, sample_length / float(sr))) ``` ## Encoding We'll now encode some audio using the pre-trained NSynth model (download from: http://download.magenta.tensorflow.org/models/nsynth/wavenet-ckpt.tar). This is pretty fast, and takes about 3 seconds per 1 second of audio on my NVidia 1080 GPU. This will give us a 125 x 16 dimension encoding for every 4 seconds of audio which we can then decode, or resynthesize. We'll try a few things, including just leaving it alone and reconstructing it as is. But then we'll also try some fun transformations of the encoding and see what's possible from there. ```help(fastgen.encode) Help on function encode in module magenta.models.nsynth.wavenet.fastgen: encode(wav_data, checkpoint_path, sample_length=64000) Generate an array of embeddings from an array of audio. Args: wav_data: Numpy array [batch_size, sample_length] checkpoint_path: Location of the pretrained model. sample_length: The total length of the final wave file, padded with 0s. Returns: encoding: a [mb, 125, 16] encoding (for 64000 sample audio file). ``` ``` %time encoding = fastgen.encode(audio, 'model.ckpt-200000', sample_length) ``` This returns a 3-dimensional tensor representing the encoding of the audio. The first dimension of the encoding represents the batch dimension. We could have passed in many audio files at once and the process would be much faster. For now we've just passed in one audio file. ``` print(encoding.shape) ``` We'll also save the encoding so that we can use it again later: ``` np.save(fname + '.npy', encoding) ``` Let's take a look at the encoding of this audio file. Think of these as 16 channels of sounds all mixed together (though with a lot of caveats): ``` fig, axs = plt.subplots(2, 1, figsize=(10, 5)) axs[0].plot(audio); axs[0].set_title('Audio Signal') axs[1].plot(encoding[0]); axs[1].set_title('NSynth Encoding') ``` You should be able to pretty clearly see a sort of beat like pattern in both the signal and the encoding. ## Decoding Now we can decode the encodings as is. This is the process that takes awhile, though it used to be so long that you wouldn't even dare trying it. There is still plenty of room for improvement and I'm sure it will get faster very soon. ``` help(fastgen.synthesize) Help on function synthesize in module magenta.models.nsynth.wavenet.fastgen: synthesize(encodings, save_paths, checkpoint_path='model.ckpt-200000', samples_per_save=1000) Synthesize audio from an array of embeddings. Args: encodings: Numpy array with shape [batch_size, time, dim]. save_paths: Iterable of output file names. checkpoint_path: Location of the pretrained model. [model.ckpt-200000] samples_per_save: Save files after every amount of generated samples. ``` ``` %time fastgen.synthesize(encoding, save_paths=['gen_' + fname], samples_per_save=sample_length) ``` After it's done synthesizing, we can see that takes about 6 minutes per 1 second of audio on a non-optimized version of Tensorflow for GPU on an NVidia 1080 GPU. We can speed things up considerably if we want to do multiple encodings at a time. We'll see that in just a moment. Let's first listen to the synthesized audio: ``` sr = 16000 synthesis = utils.load_audio('gen_' + fname, sample_length=sample_length, sr=sr) ``` Listening to the audio, the sounds are definitely different. NSynth seems to apply a sort of gobbly low-pass that also really doesn't know what to do with the high frequencies. It is really quite hard to describe, but that is what is so interesting about it. It has a recognizable, characteristic sound. Let's try another one. I'll put the whole workflow for synthesis in two cells, and we can listen to another synthesis of a vocalist singing, "Laaaa": ``` def load_encoding(fname, sample_length=None, sr=16000, ckpt='model.ckpt-200000'): audio = utils.load_audio(fname, sample_length=sample_length, sr=sr) encoding = fastgen.encode(audio, ckpt, sample_length) return audio, encoding # from https://www.freesound.org/people/maurolupo/sounds/213259/ fname = '213259__maurolupo__girl-sings-laa.wav' sample_length = 32000 audio, encoding = load_encoding(fname, sample_length) fastgen.synthesize( encoding, save_paths=['gen_' + fname], samples_per_save=sample_length) synthesis = utils.load_audio('gen_' + fname, sample_length=sample_length, sr=sr) ``` Aside from the quality of the reconstruction, what we're really after is what is possible with such a model. Let's look at two examples now. # Part 2: Timestretching Let's try something more fun. We'll stretch the encodings a bit and see what it sounds like. If you were to try and stretch audio directly, you'd hear a pitch shift. There are some other ways of stretching audio without shifting pitch, like granular synthesis. But it turns out that NSynth can also timestretch. Let's see how. First we'll use image interpolation to help stretch the encodings. ``` # use image interpolation to stretch the encoding: (pip install scikit-image) try: from skimage.transform import resize except ImportError: !pip install scikit-image from skimage.transform import resize ``` Here's a utility function to help you stretch your own encoding. It uses skimage.transform and will retain the range of values. Images typically only have a range of 0-1, but the encodings aren't actually images so we'll keep track of their min/max in order to stretch them like images. ``` def timestretch(encodings, factor): min_encoding, max_encoding = encoding.min(), encoding.max() encodings_norm = (encodings - min_encoding) / (max_encoding - min_encoding) timestretches = [] for encoding_i in encodings_norm: stretched = resize(encoding_i, (int(encoding_i.shape[0] * factor), encoding_i.shape[1]), mode='reflect') stretched = (stretched * (max_encoding - min_encoding)) + min_encoding timestretches.append(stretched) return np.array(timestretches) # from https://www.freesound.org/people/MustardPlug/sounds/395058/ fname = '395058__mustardplug__breakbeat-hiphop-a4-4bar-96bpm.wav' sample_length = 40000 audio, encoding = load_encoding(fname, sample_length) ``` Now let's stretch the encodings with a few different factors: ``` audio = utils.load_audio('gen_slower_' + fname, sample_length=None, sr=sr) Audio(audio, rate=sr) encoding_slower = timestretch(encoding, 1.5) encoding_faster = timestretch(encoding, 0.5) ``` Basically we've made a slower and faster version of the amen break's encodings. The original encoding is shown in black: ``` fig, axs = plt.subplots(3, 1, figsize=(10, 7), sharex=True, sharey=True) axs[0].plot(encoding[0]); axs[0].set_title('Encoding (Normal Speed)') axs[1].plot(encoding_faster[0]); axs[1].set_title('Encoding (Faster))') axs[2].plot(encoding_slower[0]); axs[2].set_title('Encoding (Slower)') ``` Now let's decode them: ``` fastgen.synthesize(encoding_faster, save_paths=['gen_faster_' + fname]) fastgen.synthesize(encoding_slower, save_paths=['gen_slower_' + fname]) ``` It seems to work pretty well and retains the pitch and timbre of the original sound. We could even quickly layer the sounds just by adding them. You might want to do this in a program like Logic or Ableton Live instead and explore more possiblities of these sounds! # Part 3: Interpolating Sounds Now let's try something more experimental. NSynth released plenty of great examples of what happens when you mix the embeddings of different sounds: https://magenta.tensorflow.org/nsynth-instrument - we're going to do the same but now with our own sounds! First let's load some encodings: ``` sample_length = 80000 # from https://www.freesound.org/people/MustardPlug/sounds/395058/ aud1, enc1 = load_encoding('395058__mustardplug__breakbeat-hiphop-a4-4bar-96bpm.wav', sample_length) # from https://www.freesound.org/people/xserra/sounds/176098/ aud2, enc2 = load_encoding('176098__xserra__cello-cant-dels-ocells.wav', sample_length) ``` Now we'll mix the two audio signals together. But this is unlike adding the two signals together in a Ableton or simply hearing both sounds at the same time. Instead, we're averaging the representation of their timbres, tonality, change over time, and resulting audio signal. This is way more powerful than a simple averaging. ``` enc_mix = (enc1 + enc2) / 2.0 fig, axs = plt.subplots(3, 1, figsize=(10, 7)) axs[0].plot(enc1[0]); axs[0].set_title('Encoding 1') axs[1].plot(enc2[0]); axs[1].set_title('Encoding 2') axs[2].plot(enc_mix[0]); axs[2].set_title('Average') fastgen.synthesize(enc_mix, save_paths='mix.wav') ``` As another example of what's possible with interpolation of embeddings, we'll try crossfading between the two embeddings. To do this, we'll write a utility function which will use a hanning window to apply a fade in or out to the embeddings matrix: ``` def fade(encoding, mode='in'): length = encoding.shape[1] fadein = (0.5 * (1.0 - np.cos(3.1415 * np.arange(length) / float(length)))).reshape(1, -1, 1) if mode == 'in': return fadein * encoding else: return (1.0 - fadein) * encoding fig, axs = plt.subplots(3, 1, figsize=(10, 7)) axs[0].plot(enc1[0]); axs[0].set_title('Original Encoding') axs[1].plot(fade(enc1, 'in')[0]); axs[1].set_title('Fade In') axs[2].plot(fade(enc1, 'out')[0]); axs[2].set_title('Fade Out') ``` Now we can cross fade two different encodings by adding their repsective fade ins and out: ``` def crossfade(encoding1, encoding2): return fade(encoding1, 'out') + fade(encoding2, 'in') fig, axs = plt.subplots(3, 1, figsize=(10, 7)) axs[0].plot(enc1[0]); axs[0].set_title('Encoding 1') axs[1].plot(enc2[0]); axs[1].set_title('Encoding 2') axs[2].plot(crossfade(enc1, enc2)[0]); axs[2].set_title('Crossfade') ``` Now let's synthesize the resulting encodings: ``` fastgen.synthesize(crossfade(enc1, enc2), save_paths=['crossfade.wav']) ``` There is a lot to explore with NSynth. So far I've just shown you a taste of what's possible when you are able to generate your own sounds. I expect the generation process will soon get much faster, especially with help from the community, and for more unexpected and interesting applications to emerge. Please keep in touch with whatever you end up creating, either personally via [twitter](https://twitter.com/pkmital), in our [Creative Applications of Deep Learning](https://www.kadenze.com/programs/creative-applications-of-deep-learning-with-tensorflow) community on Kadenze, or the [Magenta Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/magenta-discuss).
github_jupyter
### Multiple Regression <br> a - alpha<br> b - beta<br> i - ith user<br> e - error term<br> Equation - $y_{i}$ = $a_{}$ + $b_{1}$$x_{i1}$ + $b_{2}$$x_{i2}$ + ... + $b_{k}$$x_{ik}$ + $e_{i}$ beta = [alpha, beta_1, beta_2,..., beta_k]<br> x_i = [1, x_i1, x_i2,..., x_ik]<br> <br> ``` inputs = [[123,123,243],[234,455,578],[454,565,900],[705,456,890]] from typing import List from scratch.linear_algebra import dot, Vector def predict(x:Vector, beta: Vector) -> float: return dot(x,beta) def error(x:Vector, y:float, beta:Vector) -> float: return predict(x,beta) - y def squared_error(x:Vector, y:float, beta:Vector) -> float: return error(x,y,beta) ** 2 x = [1,2,3] y = 30 beta = [4,4,4] assert error(x,y,beta) == -6 assert squared_error(x,y,beta) == 36 def sqerror_gradient(x:Vector, y:float, beta:Vector) -> Vector: err = error(x,y,beta) return [2*err*x_i for x_i in x] assert sqerror_gradient(x,y,beta) == [-12,-24,-36] import random import tqdm from scratch.linear_algebra import vector_mean from scratch.gradient_descent import gradient_step def least_squares_fit(xs:List[Vector], ys:List[float], learning_rate: float=0.001, num_steps: int = 1000, batch_size: int = 1) -> Vector: guess = [random.random() for _ in xs[0]] for _ in tqdm.trange(num_steps, desc='least squares fit'): for start in range(0, len(x), batch_size): batch_xs = xs[start:start+batch_size] batch_ys = ys[start:start+batch_size] gradient = vector_mean([ sqerror_gradient(x,y,guess) for x,y in zip(batch_xs,batch_ys)]) guess = gradient_step(guess,gradient,-learning_rate) return guess from scratch.statistics import daily_minutes_good from scratch.gradient_descent import gradient_step random.seed(0) learning_rate = 0.001 beta = least_squares_fit(inputs,daily_minutes_good,learning_rate,5000,25) # ERROR ( no 'inputs' variable defined ) inputs = [[123,123,243],[234,455,578],[454,565,900],[705,456,890]] # inputs = [123,123,243,234,455,578,454,565,900,705,456,890] from scratch.simple_linear_regression import total_sum_of_squares def multiple_r_squared(xs:List[Vector], ys:Vector, beta:Vector) -> float: sum_of_squared_errors = sum(error(x,y,beta**2) for x,y in zip(xs,ys)) return 1.0 - sum_of_squared_errors/ total_sum_of_squares(ys) assert 0.67 < multiple_r_squared(inputs, daily_minutes_good, beta) < 0.68 # ERROR ( no 'inputs' variable defined ) ``` <b>Digression: The Bootstrap</b> ``` from typing import TypeVar, Callable X = TypeVar('X') Stat = TypeVar('Stat') def bootstrap_sample(data:List[X]) -> List[X]: return [random.choice(data) for _ in data] def bootstrap_statistics(data:List[X], stats_fn: Callable[[List[X]],Stat], num_samples: int) -> List[Stat]: return [stats_fn(bootstrap_sample(data)) for _ in range(num_samples)] close_to_100 = [99.5 + random.random() for _ in range(101)] far_from_100 = ([99.5 + random.random()] + [random.random() for _ in range(50)] + [200 + random.random() for _ in range(50)]) from scratch.statistics import median, standard_deviation median_close = bootstrap_statistics(close_to_100,median,100) median_far = bootstrap_statistics(far_from_100,median,100) print(median_close) print(median_far) from typing import Tuple import datetime def estimate_sample_beta(pairs:List[Tuple[Vector,float]]): x_sample = [x for x, _ in pairs] y_sample = [y for _, y in pairs] beta = least_squares_fit(x_sample,y_sample,learning_rate,5000,25) print("bootstrap sample",beta) return beta random.seed(0) bootstrap_betas = bootstrap_statistics(list(zip(inputs, daily_minutes_good)), estimate_sample_beta, 100) # ERROR ( no 'inputs' variable defined ) bootstrap_standard_errors = [ standard_deviation([beta[i] for beta in bootstrap_betas]) for i in range(4)] print(bootstrap_standard_errors) # ERROR ( no 'inputs' variable defined ) from scratch.probability import normal_cdf def p_value(beta_hat_j: float, sigma_hat_j:float) -> float: if beta_hat_j > 0: return 2 * (1 - normal_cdf(beta_hat_j/sigma_hat_j)) else: return 2 * normal_cdf(beta_hat_j/sigma_hat_j) assert p_value(30.58, 1.27) < 0.001 # constant term assert p_value(0.972, 0.103) < 0.001 # num_friends ``` <b>Regularization</b> ``` def ridge_penalty(beta:Vector, alpha:float)->float: return alpha*dot(beta[1:],beta[1:]) def squared_error_ridge(x: Vector, y: float, beta: Vector, alpha: float) -> float: return error(x, y, beta) ** 2 + ridge_penalty(beta, alpha) from scratch.linear_algebra import add def ridge_penalty_gradient(beta: Vector, alpha: float) -> Vector: return [0.] + [2 * alpha * beta_j for beta_j in beta[1:]] def sqerror_ridge_gradient(x: Vector, y: float, beta: Vector, alpha: float) -> Vector: return add(sqerror_gradient(x, y, beta), ridge_penalty_gradient(beta, alpha)) def least_squares_fit_ridge(xs:List[Vector], ys:List[float], learning_rate: float=0.001, num_steps: int = 1000, batch_size: int = 1) -> Vector: guess = [random.random() for _ in xs[0]] for _ in tqdm.trange(num_steps, desc='least squares fit'): for start in range(0, len(x), batch_size): batch_xs = xs[start:start+batch_size] batch_ys = ys[start:start+batch_size] gradient = vector_mean([ sqerror_ridge_gradient(x,y,guess) for x,y in zip(batch_xs,batch_ys)]) guess = gradient_step(guess,gradient,-learning_rate) return guess random.seed(0) beta_0 = least_squares_fit_ridge(inputs, daily_minutes_good, 0.0, # alpha learning_rate, 5000, 25) # [30.51, 0.97, -1.85, 0.91] assert 5 < dot(beta_0[1:], beta_0[1:]) < 6 assert 0.67 < multiple_r_squared(inputs, daily_minutes_good, beta_0) < 0.69 # ERROR ( no 'inputs' variable defined ) beta_0_1 = least_squares_fit_ridge(inputs, daily_minutes_good, 0.1, # alpha learning_rate, 5000, 25) # [30.8, 0.95, -1.83, 0.54] assert 4 < dot(beta_0_1[1:], beta_0_1[1:]) < 5 assert 0.67 < multiple_r_squared(inputs, daily_minutes_good, beta_0_1) < 0.69 beta_1 = least_squares_fit_ridge(inputs, daily_minutes_good, 1, # alpha learning_rate, 5000, 25) # [30.6, 0.90, -1.68, 0.10] assert 3 < dot(beta_1[1:], beta_1[1:]) < 4 assert 0.67 < multiple_r_squared(inputs, daily_minutes_good, beta_1) < 0.69 beta_10 = least_squares_fit_ridge(inputs, daily_minutes_good,10, # alpha learning_rate, 5000, 25) # [28.3, 0.67, -0.90, -0.01] assert 1 < dot(beta_10[1:], beta_10[1:]) < 2 assert 0.5 < multiple_r_squared(inputs, daily_minutes_good, beta_10) < 0.6 def lasso_penalty(beta, alpha): return alpha * sum(abs(beta_i) for beta_i in beta[1:]) ```
github_jupyter
## A Two-sample t-test to find differentially expressed miRNA's between normal and tumor tissues in Lung Adenocarcinoma ``` import os import pandas mirna_src_dir = os.getcwd() + "/assn-mirna-luad/data/processed/miRNA/" clinical_src_dir = os.getcwd() + "/assn-mirna-luad/data/processed/clinical/" mirna_tumor_df = pandas.read_csv(mirna_src_dir+'tumor_miRNA.csv') mirna_normal_df = pandas.read_csv(mirna_src_dir+'normal_miRNA.csv') clinical_df = pandas.read_csv(clinical_src_dir+'clinical.csv') print "mirna_tumor_df.shape", mirna_tumor_df.shape print "mirna_normal_df.shape", mirna_normal_df.shape """ Here we select samples to use for our regression analysis """ matched_samples = pandas.merge(clinical_df, mirna_normal_df, on='patient_barcode')['patient_barcode'] # print "matched_samples", matched_samples.shape # merged = pandas.merge(clinical_df, mirna_tumor_df, on='patient_barcode') # print merged.shape # print # print merged['histological_type'].value_counts().sort_index(axis=0) # print # print merged['pathologic_stage'].value_counts().sort_index(axis=0) # print # print merged['pathologic_T'].value_counts().sort_index(axis=0) # print # print merged['pathologic_N'].value_counts().sort_index(axis=0) # print # print merged['pathologic_M'].value_counts().sort_index(axis=0) # print from sklearn import preprocessing import numpy as np X_normal = mirna_normal_df[mirna_normal_df['patient_barcode'].isin(matched_samples)].sort_values(by=['patient_barcode']).copy() X_tumor = mirna_tumor_df.copy() X_tumor_matched = mirna_tumor_df[mirna_tumor_df['patient_barcode'].isin(matched_samples)].sort_values(by=['patient_barcode']).copy() X_normal.__delitem__('patient_barcode') X_tumor_matched.__delitem__('patient_barcode') X_tumor.__delitem__('patient_barcode') print "X_normal.shape", X_normal.shape print "X_tumor.shape", X_tumor.shape print "X_tumor_matched.shape", X_tumor_matched.shape mirna_list = X.columns.values # X_scaler = preprocessing.StandardScaler(with_mean=False).fit(X) # X = X_scaler.transform(X) from scipy.stats import ttest_rel import matplotlib.pyplot as plt ttest = ttest_rel(X_tumor_matched, X_normal) plt.plot(ttest[1], ls='', marker='.') plt.title('Two sample t-test between tumor and normal LUAD tissues') plt.ylabel('p-value') plt.xlabel('miRNA\'s') plt.show() from scipy.stats import ttest_ind ttest_2 = ttest_2_ind(X_tumor, X_normal) plt.plot(ttest_2[1], ls='', marker='.') plt.title('Independent sample t-test between tumor and normal LUAD tissues') plt.ylabel('p-value') plt.xlabel('miRNA\'s') plt.show() ```
github_jupyter
# Step 7: Serve data from OpenAgua into WEAP using WaMDaM #### By Adel M. Abdallah, Dec 2020 Execute the following cells by pressing `Shift-Enter`, or by pressing the play button <img style='display:inline;padding-bottom:15px' src='play-button.png'> on the toolbar above. ## Steps 1. Import python libraries 2. Import the pulished SQLite file for the WEAP model from HydroShare. 3. Prepare to connect to the WEAP API 4. Connect to WEAP API to programmatically populate WEAP with data, run it, get back results Create a copy of the original WEAP Area to use while keeping the orignial as-as for any later use 5.3 Export the unmet demand percent into Excel to load them into WaMDaM <a name="Import"></a> # 1. Import python libraries ``` # 1. Import python libraries ### set the notebook mode to embed the figures within the cell import numpy import sqlite3 import numpy as np import pandas as pd import getpass from hs_restclient import HydroShare, HydroShareAuthBasic import os import plotly plotly.__version__ import plotly.offline as offline import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot offline.init_notebook_mode(connected=True) from plotly.offline import init_notebook_mode, iplot from plotly.graph_objs import * init_notebook_mode(connected=True) # initiate notebook for offline plot import os import csv from collections import OrderedDict import sqlite3 import pandas as pd import numpy as np from IPython.display import display, Image, SVG, Math, YouTubeVideo import urllib import calendar print 'The needed Python libraries have been imported' ``` # 2. Connect to the WaMDaM SQLite on HydroSahre ### Provide the HydroShare ID for your resource Example https://www.hydroshare.org/resource/af71ef99a95e47a89101983f5ec6ad8b/ resource_id='85e9fe85b08244198995558fe7d0e294' ``` # enter your HydroShare username and password here between the quotes username = '' password = '' auth = HydroShareAuthBasic(username=username, password=password) hs = HydroShare(auth=auth) print 'Connected to HydroShare' # Then we can run queries against it within this notebook :) resource_url='https://www.hydroshare.org/resource/af71ef99a95e47a89101983f5ec6ad8b/' resource_id= resource_url.split("https://www.hydroshare.org/resource/",1)[1] resource_id=resource_id.replace('/','') print resource_id resource_md = hs.getSystemMetadata(resource_id) # print resource_md print 'Resource title' print(resource_md['resource_title']) print '----------------------------' resources=hs.resource(resource_id).files.all() file = "" for f in hs.resource(resource_id).files.all(): file += f.decode('utf8') import json file_json = json.loads(file) for f in file_json["results"]: FileURL= f["url"] SQLiteFileName=FileURL.split("contents/",1)[1] cwd = os.getcwd() print cwd fpath = hs.getResourceFile(resource_id, SQLiteFileName, destination=cwd) conn = sqlite3.connect(SQLiteFileName,timeout=10) print 'Connected to the SQLite file= '+ SQLiteFileName print 'done' ``` <a name="ConnectWEAP"></a> # 2. Prepare to the Connect to the WEAP API ### You need to have WEAP already installed on your machine First make sure to have a copy of the Water Evaluation And Planning" system (WEAP) installed on your local machine (Windows). If you don’t have it installed, download and install the WEAP software which allows you to run the Bear River WEAP model and its scenarios for Use Case 5. https://www.weap21.org/. You need to have a WEAP License. See here (https://www.weap21.org/index.asp?action=217). If you're interested to learning about WEAP API, check it out here: http://www.weap21.org/WebHelp/API.htm ## Install dependency and register WEAP ### 2.1. Install pywin32 extensions which provide access to many of the Windows APIs from Python. **Choose on option** * a. Install using an executable basedon your python version. Use version for Python 2.7 https://github.com/mhammond/pywin32/releases **OR** * b. Install it using Anaconda terminal @ https://anaconda.org/anaconda/pywin32 Type this command in the Anaconda terminal as Administrator conda install -c anaconda pywin32 **OR** * c. Install from source code (for advanced users) https://github.com/mhammond/pywin32 ### 2.2. Register WEAP with Windows This use case only works on a local Jupyter Notebook server installed on your machine along with WEAP. So it does not work on the online Notebooks in Step 2.1. You need to install Jupyter Server in Step 2.2 then proceed here. * **Register WEAP with Windows to allow the WEAP API to be accessed** Use Windows "Command Prompt". Right click and then <font color=red>**run as Administrator**</font>, navigate to the WEAP installation directory such as and then hit enter ``` cd C:\Program Files (x86)\WEAP ``` Then type the following command in the command prompt and hit enter ``` WEAP /regserver ``` <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/QuerySelect/images/RegisterWEAP_CMD.png?raw=true" style="float:center;width:700px;padding:20px"> Figure 1: Register WEAP API with windows using the Command Prompt (Run as Administrator) # 3. Connect Jupyter Notebook to WEAP API Clone or download all this GitHub repo https://github.com/WamdamProject/WaMDaM_UseCases In your local repo folder, go to the C:\Users\Adel\Documents\GitHub\WaMDaM_UseCases/UseCases_files/1Original_Datasets_preperation_files/WEAP/Bear_River_WEAP_Model_2017 Copy this folder **Bear_River_WEAP_Model_2017** and paste it into **WEAP Areas** folder on your local machine. For example, it is at C:\Users\Adel\Documents\WEAP Areas ``` # this library is needed to connect to the WEAP API import win32com.client # this command will open the WEAP software (if closed) and get the last active model # you could change the active area to another one inside WEAP or by passing it to the command here #WEAP.ActiveArea = "BearRiverFeb2017_V10.9" WEAP=win32com.client.Dispatch("WEAP.WEAPApplication") # WEAP.Visible = 'FALSE' print WEAP.ActiveArea.Name WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Original" print WEAP.ActiveArea.Name WEAP.Areas("Bear_River_WEAP_Model_2017_Original").Open WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Original" print WEAP.ActiveArea.Name print 'Connected to WEAP API and the '+ WEAP.ActiveArea.Name + ' Area' print '-------------' if not WEAP.Registered: print "Because WEAP is not registered, you cannot use the API" # get the active WEAP Area (model) to serve data into it # ActiveArea=WEAP.ActiveArea.Name # get the active WEAP scenario to serve data into it print '-------------' ActiveScenario= WEAP.ActiveScenario.Name print '\n ActiveScenario= '+ActiveScenario print '-------------' WEAP_Area_dir=WEAP.AreasDirectory print WEAP_Area_dir print "\n \n You're connected to the WEAP API" ``` <a name="CreateWEAP_Area"></a> # 4 Create a copy of the original WEAP Area to use while keeping the orignial as-as for any later use <a name="AddScenarios"></a> ### Add a new CacheCountyUrbanWaterUse scenario from the Reference original WEAP Area: ### You can always use this orignal one and delete any new copies you make afterwards. ``` # Create a copy of the WEAP AREA to serve the updated Hyrym Reservoir to it # Delete the Area if it exists and then add it. Start from fresh Area="Bear_River_WEAP_Model_2017_Conservation" if not WEAP.Areas.Exists(Area): WEAP.SaveAreaAs(Area) WEAP.ActiveArea.Save WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Conservation" print 'ActiveArea= '+WEAP.ActiveArea.Name # Add new Scenario # Add(NewScenarioName, ParentScenarioName or Index): # Create a new scenario as a child of the parent scenario specified. # The new scenario will become the selected scenario in the Data View. WEAP=win32com.client.Dispatch("WEAP.WEAPApplication") # WEAP.Visible = FALSE WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Conservation" print 'ActiveArea= '+ WEAP.ActiveArea.Name Scenarios=[] Scenarios=['Cons25PercCacheUrbWaterUse','Incr25PercCacheUrbWaterUse'] # Delete the scenario if it exists and then add it. Start from fresh for Scenario in Scenarios: if WEAP.Scenarios.Exists(Scenario): # delete it WEAP.Scenarios(Scenario).Delete(True) # add it back as a fresh copy WEAP.Scenarios.Add(Scenario,'Reference') else: WEAP.Scenarios.Add(Scenario,'Reference') WEAP.ActiveArea.Save WEAP.SaveArea WEAP.Quit # or add the scenarios one by one using this command # Make a copy from the reference (base) scenario # WEAP.Scenarios.Add('UpdateCacheDemand','Reference') print '---------------------- \n' print 'Scenarios added to the original WEAP area' WEAP.Quit print 'Connection with WEAP API is disconnected' ``` <a name="QuerySupplyDataLoadWEAP"></a> # 4.A Query Cache County seasonal "Monthly Demand" for the three sites: Logan Potable, North Cache Potable, South Cache Potable ### The data comes from OpenAgua ``` # Use Case 3.1Identify_aggregate_TimeSeriesValues.csv # plot aggregated to monthly and converted to acre-feet time series data of multiple sources # Logan Potable # North Cache Potable # South Cache Potable # 2.2Identify_aggregate_TimeSeriesValues.csv Query_UseCase_URL=""" https://raw.githubusercontent.com/WamdamProject/WaMDaM_JupyterNotebooks/master/3_VisualizePublish/SQL_queries/WEAP/Query_demand_sites.sql """ # Read the query text inside the URL Query_UseCase_text = urllib.urlopen(Query_UseCase_URL).read() # return query result in a pandas data frame result_df_UseCase= pd.read_sql_query(Query_UseCase_text, conn) # uncomment the below line to see the list of attributes # display (result_df_UseCase) seasons_dict = dict() seasons_dict2=dict() Scenarios=['Cons25PercCacheUrbWaterUse','Incr25PercCacheUrbWaterUse'] subsets = result_df_UseCase.groupby(['ScenarioName','InstanceName']) for subset in subsets.groups.keys(): if subset[0] in Scenarios: df_Seasonal = subsets.get_group(name=subset) df_Seasonal=df_Seasonal.reset_index() SeasonalParam = '' for i in range(len(df_Seasonal['SeasonName'])): m_data = df_Seasonal['SeasonName'][i] n_data = float(df_Seasonal['SeasonNumericValue'][i]) SeasonalParam += '{},{}'.format(m_data, n_data) if i != len(df_Seasonal['SeasonName']) - 1: SeasonalParam += ',' Seasonal_value="MonthlyValues("+SeasonalParam+")" seasons_dict[subset]=(Seasonal_value) # seasons_dict2[subset[0]]=seasons_dict # print seasons_dict2 print '-----------------' # print seasons_dict # seasons_dict2.get("Cons25PercCacheUrbWaterUse", {}).get("Logan Potable") # 1 print 'Query and data preperation are done' ``` <a name="LoadFlow"></a> # 4.B Load the seasonal demand data with conservation into WEAP ``` # 9. Load the seasonal data into WEAP #WEAP=win32com.client.Dispatch("WEAP.WEAPApplication") # WEAP.Visible = FALSE print WEAP.ActiveArea.Name Scenarios=['Cons25PercCacheUrbWaterUse','Incr25PercCacheUrbWaterUse'] DemandSites=['Logan Potable','North Cache Potable','South Cache Potable'] AttributeName='Monthly Demand' for scenario in Scenarios: WEAP.ActiveScenario = scenario print WEAP.ActiveScenario.Name for Branch in WEAP.Branches: for InstanceName in DemandSites: if Branch.Name == InstanceName: GetInstanceFullBranch = Branch.FullName val=seasons_dict[(scenario,InstanceName)] WEAP.Branch(GetInstanceFullBranch).Variable(AttributeName).Expression =val # print val print "loaded " + InstanceName WEAP.SaveArea print '\n The data have been sucsesfully loaded into WEAP' WEAP.SaveArea print '\n \n The updated data have been saved' ``` # 5. Run WEAP <font color=green>**Please wait, it will take ~1-3 minutes** to finish calcualting the two WEAP Areas with their many scenarios</font> ``` # Run WEAP WEAP.Areas("Bear_River_WEAP_Model_2017_Conservation").Open print WEAP.ActiveArea.Name WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Conservation" print WEAP.ActiveArea.Name print 'Please wait 1-3 min for the calculation to finish' WEAP.Calculate(2006,10,True) WEAP.SaveArea print '\n \n The calculation has been done and saved' print WEAP.CalculationTime print '\n \n Done' ``` ## 5.1 Get the unmet demand or Cache County sites in both the reference and the conservation scenarios ``` Scenarios=['Reference','Cons25PercCacheUrbWaterUse','Incr25PercCacheUrbWaterUse'] DemandSites=['Logan Potable','North Cache Potable','South Cache Potable'] UnmetDemandEstimate_Ref = pd.DataFrame(columns = DemandSites) UnmetDemandEstimate_Cons25 = pd.DataFrame(columns = DemandSites) UnmetDemandEstimate_Incr25 = pd.DataFrame(columns = DemandSites) UnmetDemandEstimate= pd.DataFrame(columns = Scenarios) for scen in Scenarios: if scen=='Reference': for site in DemandSites: param="\Demand Sites\%s: Unmet Demand[Acre-Foot]"%(site) # print param for year in range (1966,2006): value=WEAP.ResultValue(param, year, 1, scen, year, WEAP.NumTimeSteps) UnmetDemandEstimate_Ref.loc[year, [site]]=value elif scen=='Cons25PercCacheUrbWaterUse': for site in DemandSites: param="\Demand Sites\%s: Unmet Demand[Acre-Foot]"%(site) # print param for year in range (1966,2006): value=WEAP.ResultValue(param, year, 1, scen, year, WEAP.NumTimeSteps) UnmetDemandEstimate_Cons25.loc[year, [site]]=value elif scen=='Incr25PercCacheUrbWaterUse': for site in DemandSites: param="\Demand Sites\%s: Unmet Demand[Acre-Foot]"%(site) # print param for year in range (1966,2006): value=WEAP.ResultValue(param, year, 1, scen, year, WEAP.NumTimeSteps) UnmetDemandEstimate_Incr25.loc[year, [site]]=value UnmetDemandEstimate_Ref['Cache Total']=UnmetDemandEstimate_Ref[DemandSites].sum(axis=1) UnmetDemandEstimate_Cons25['Cache Total']=UnmetDemandEstimate_Cons25[DemandSites].sum(axis=1) UnmetDemandEstimate_Incr25['Cache Total']=UnmetDemandEstimate_Incr25[DemandSites].sum(axis=1) UnmetDemandEstimate['Reference']=UnmetDemandEstimate_Ref['Cache Total'] UnmetDemandEstimate['Cons25PercCacheUrbWaterUse']=UnmetDemandEstimate_Cons25['Cache Total'] UnmetDemandEstimate['Incr25PercCacheUrbWaterUse']=UnmetDemandEstimate_Incr25['Cache Total'] UnmetDemandEstimate=UnmetDemandEstimate.rename_axis('Year',axis="columns") print 'Done estimating the unment demnd pecentage for each scenario' # display(UnmetDemandEstimate) ``` ## 5.2 Get the unmet demand as a percentage for the scenarios ``` ######################################################################## # estimate the total reference demand for Cahce county to calcualte the percentage result_df_UseCase= pd.read_sql_query(Query_UseCase_text, conn) subsets = result_df_UseCase.groupby(['ScenarioName']) for subset in subsets.groups.keys(): if subset=='Bear River WEAP Model 2017': # reference df_Seasonal = subsets.get_group(name=subset) df_Seasonal=df_Seasonal.reset_index() # display (df_Seasonal) Tot=df_Seasonal["SeasonNumericValue"].tolist() float_lst = [float(x) for x in Tot] Annual_Demand=sum(float_lst) print Annual_Demand ######################################################################## years =UnmetDemandEstimate.index.values Reference_vals =UnmetDemandEstimate['Reference'].tolist() Reference_vals_perc =((numpy.array([Reference_vals]))/Annual_Demand)*100 Cons25PercCacheUrbWaterUse_vals =UnmetDemandEstimate['Cons25PercCacheUrbWaterUse'].tolist() Cons25PercCacheUrbWaterUse_vals_perc =((numpy.array([Cons25PercCacheUrbWaterUse_vals]))/Annual_Demand)*100 Incr25PercCacheUrbWaterUse_vals =UnmetDemandEstimate['Incr25PercCacheUrbWaterUse'].tolist() Incr25PercCacheUrbWaterUse_vals_perc =((numpy.array([Incr25PercCacheUrbWaterUse_vals]))/Annual_Demand)*100 print 'done estimating unmet demnd the percentages' ``` # 5.3 Export the unmet demand percent into Excel to load them into WaMDaM ``` # display(UnmetDemandEstimate) import xlsxwriter from collections import OrderedDict UnmetDemandEstimate.to_csv('UnmetDemandEstimate.csv') ExcelFileName='Test.xlsx' years =UnmetDemandEstimate.index.values #print years Columns=['ObjectType','InstanceName','ScenarioName','AttributeName','DateTimeStamp','Value'] # these three columns have fixed values for all the rows ObjectType='Demand Site' InstanceName='Cache County Urban' AttributeName='UnmetDemand' # this dict contains the keysL (scenario name) and the values are in a list # years exist in UnmetDemandEstimate. We then need to add day and month to the year date # like this format: # DateTimeStamp= 1/1/1993 Scenarios = OrderedDict() Scenarios['Bear River WEAP Model 2017_result'] = Reference_vals_perc Scenarios['Incr25PercCacheUrbWaterUse_result'] = Incr25PercCacheUrbWaterUse_vals_perc Scenarios['Cons25PercCacheUrbWaterUse_result'] = Cons25PercCacheUrbWaterUse_vals_perc #print Incr25PercCacheUrbWaterUse_vals_perc workbook = xlsxwriter.Workbook(ExcelFileName) sheet = workbook.add_worksheet('sheet') # write headers for i, header_name in enumerate(Columns): sheet.write(0, i, header_name) row = 1 col = 0 for scenario_name in Scenarios.keys(): for val_list in Scenarios[scenario_name]: # print val_list for i, val in enumerate(val_list): # print years[i] date_timestamp = '1/1/{}'.format(years[i]) sheet.write(row, 0, ObjectType) sheet.write(row, 1, InstanceName) sheet.write(row, 2, scenario_name) sheet.write(row, 3, AttributeName) sheet.write(row, 4, date_timestamp) sheet.write(row, 5, val) row += 1 workbook.close() print 'done writing to Excel' print 'Next, copy the exported data into a WaMDaM workbook template for the WEAP model' ``` # 6. Plot the unmet demad for all the scenarios and years ``` trace2 = go.Scatter( x=years, y=Reference_vals_perc[0], name = 'Reference demand', mode = 'lines+markers', marker = dict( color = '#264DFF', )) trace3 = go.Scatter( x=years, y=Cons25PercCacheUrbWaterUse_vals_perc[0], name = 'Conserve demand by 25%', mode = 'lines+markers', marker = dict( color = '#3FA0FF' )) trace1 = go.Scatter( x=years, y=Incr25PercCacheUrbWaterUse_vals_perc[0], name = 'Increase demand by 25%', mode = 'lines+markers', marker = dict( color = '#290AD8' )) layout = dict( #title = "Use Case 3.3", yaxis = dict( title = "Annual unmet demand (%)", tickformat= ',', showline=True, dtick='5', ticks='outside', ticklen=10, tickcolor='#000', gridwidth=1, showgrid=True, ), xaxis = dict( # title = "Updated input parameters in the <br>Bear_River_WEAP_Model_2017", # showline=True, ticks='inside', tickfont=dict(size=22), tickcolor='#000', gridwidth=1, showgrid=True, ticklen=25 ), legend=dict( x=0.05,y=1.1, bordercolor='#00000f', borderwidth=2 ), width=1100, height=700, #paper_bgcolor='rgb(233,233,233)', #plot_bgcolor='rgb(233,233,233)', margin=go.Margin(l=130,b=200), font=dict(size=25,family='arial',color='#00000f'), showlegend=True ) data = [trace1, trace2,trace3] # create a figure object fig = dict(data=data, layout=layout) #py.iplot(fig, filename = "2.3Identify_SeasonalValues") ## it can be run from the local machine on Pycharm like this like below ## It would also work here offline but in a seperate window offline.iplot(fig,filename = 'jupyter/UnmentDemand@BirdRefuge' ) print "Figure x is replicated!!" ``` <a name="Close"></a> # 7. Upload the new result scenarios to OpenAgua to visulize them there You already uploaded the results form WaMDaM SQLite earlier at the begnining of these Jupyter Notebooks. So all you need is to select to display the result in OpenAgua. Finally, click, load data. It should replicate the same figure above and Figure 6 in the paper <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/images/WEAP_results_OA.PNG?raw=true" style="float:center;width:900px;padding:20px"> <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/images/WEAP_results_OA2.PNG?raw=true" style="float:center;width:900px;padding:20px"> <a name="Close"></a> # 8. Close the SQLite and WEAP API connections ``` # 9. Close the SQLite and WEAP API connections conn.close() print 'connection disconnected' # Uncomment WEAP.SaveArea # this command will close WEAP WEAP.Quit print 'Connection with WEAP API is disconnected' ``` # The End :) Congratulations!
github_jupyter
``` import argparse import time from collections import defaultdict from pathlib import Path import h5py import fastmri import fastmri.data.transforms as T import numpy as np import requests import torch from fastmri.data import SliceDataset from fastmri.models import Unet from tqdm import tqdm # loading multi coil knee file fname = '/scratch/svangurp/samuel/data/knee/train/file1000002.h5' data = h5py.File(fname, 'r') kspace = data["kspace"][()] def run_unet_model(batch, model, device): image, _, mean, std, fname, slice_num, _ = batch output = model(image.to(device).unsqueeze(1)).squeeze(1).cpu() mean = mean.unsqueeze(1).unsqueeze(2) std = std.unsqueeze(1).unsqueeze(2) output = (output * std + mean).cpu() return output, int(slice_num[0]), fname[0] def run_inference(challenge, state_dict_file, data_path, output_path, device): model = Unet(in_chans=1, out_chans=1, chans=256, num_pool_layers=4, drop_prob=0.0) # download the state_dict if we don't have it if state_dict_file is None: if not Path(MODEL_FNAMES[challenge]).exists(): url_root = UNET_FOLDER download_model(url_root + MODEL_FNAMES[challenge], MODEL_FNAMES[challenge]) state_dict_file = MODEL_FNAMES[challenge] model.load_state_dict(torch.load(state_dict_file)) model = model.eval() # data loader setup if "_mc" in challenge: data_transform = T.UnetDataTransform(which_challenge="multicoil") else: data_transform = T.UnetDataTransform(which_challenge="singlecoil") if "_mc" in challenge: dataset = SliceDataset( root=data_path, transform=data_transform, challenge="multicoil", ) else: dataset = SliceDataset( root=data_path, transform=data_transform, challenge="singlecoil", ) dataloader = torch.utils.data.DataLoader(dataset, num_workers=4) # run the model start_time = time.perf_counter() outputs = defaultdict(list) model = model.to(device) for batch in tqdm(dataloader, desc="Running inference"): with torch.no_grad(): output, slice_num, fname = run_unet_model(batch, model, device) outputs[fname].append((slice_num, output)) # save outputs for fname in outputs: outputs[fname] = np.stack([out for _, out in sorted(outputs[fname])]) fastmri.save_reconstructions(outputs, output_path / "reconstructions") end_time = time.perf_counter() print(f"Elapsed time for {len(dataloader)} slices: {end_time-start_time}") if __name__ == "__main__": parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "--challenge", default="unet_knee_sc", choices=( "unet_knee_sc", "unet_knee_mc", "unet_brain_mc", ), type=str, help="Model to run", ) parser.add_argument( "--device", default="cuda", type=str, help="Model to run", ) parser.add_argument( "--state_dict_file", default=None, type=Path, help="Path to saved state_dict (will download if not provided)", ) parser.add_argument( "--data_path", type=Path, required=True, help="Path to subsampled data", ) parser.add_argument( "--output_path", type=Path, required=True, help="Path for saving reconstructions", ) args = parser.parse_args() run_inference( args.challenge, args.state_dict_file, args.data_path, args.output_path, torch.device(args.device), ) challenge = 'unet_knee_mc' state_dict_file ='/home/svangurp/scratch/samuel/pretrained/knee/unet/knee_mc_leaderboard_state_dict.pt' data_path = '/scratch/svangurp/samuel/data/knee/train/' output_path ='/home/svangurp/scratch/samuel/data/knee/model_ouputs/Unet_recon_knee_mc/' device = 'cuda' run_inference(challenge, state_dict_file, data_path, output_path, device) ```
github_jupyter
# Optimizing building HVAC with Amazon SageMaker RL ``` import sagemaker import boto3 from sagemaker.rl import RLEstimator from source.common.docker_utils import build_and_push_docker_image ``` ## Initialize Amazon SageMaker ``` role = sagemaker.get_execution_role() sm_session = sagemaker.session.Session() # SageMaker SDK creates a default bucket. Change this bucket to your own bucket, if needed. s3_bucket = sm_session.default_bucket() s3_output_path = f's3://{s3_bucket}' print(f'S3 bucket path: {s3_output_path}') print(f'Role: {role}') ``` ## Set additional training parameters ### Set instance type Set `cpu_or_gpu` to either `'cpu'` or `'gpu'` for using CPU or GPU instances. ### Configure the framework you want to use Set `framework` to `'tf'` or `'torch'` for TensorFlow or PyTorch, respectively. You will also have to edit your entry point i.e., `train-sagemaker-distributed.py` with the configuration parameter `"use_pytorch"` to match the framework that you have selected. ``` job_name_prefix = 'energyplus-hvac-ray' cpu_or_gpu = 'gpu' # has to be either cpu or gpu if cpu_or_gpu != 'cpu' and cpu_or_gpu != 'gpu': raise ValueError('cpu_or_gpu has to be either cpu or gpu') framework = 'tf' instance_type = 'ml.g4dn.16xlarge' # g4dn.16x large has 1 GPU and 64 cores ``` # Train your homogeneous scaling job here ### Edit the training code The training code is written in the file `train-sagemaker-distributed.py` which is uploaded in the /source directory. *Note that ray will automatically set `"ray_num_cpus"` and `"ray_num_gpus"` in `_get_ray_config`* ``` !pygmentize source/train-sagemaker-distributed.py ``` ### Train the RL model using the Python SDK Script mode When using SageMaker for distributed training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs. 1. Specify the source directory where the environment, presets and training code is uploaded. 2. Specify the entry point as the training code 3. Specify the image (CPU or GPU) to be used for the training environment. 4. Define the training parameters such as the instance count, job name, S3 path for output and job name. 5. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks. #### GPU docker image ``` # Build image repository_short_name = f'sagemaker-hvac-ray-{cpu_or_gpu}' docker_build_args = { 'CPU_OR_GPU': cpu_or_gpu, 'AWS_REGION': boto3.Session().region_name, 'FRAMEWORK': framework } image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using ECR image %s" % image_name) metric_definitions = [ {'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episodes_total', 'Regex': 'episodes_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'num_steps_trained', 'Regex': 'num_steps_trained: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'timesteps_total', 'Regex': 'timesteps_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episode_reward_max', 'Regex': 'episode_reward_max: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episode_reward_mean', 'Regex': 'episode_reward_mean: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episode_reward_min', 'Regex': 'episode_reward_min: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, ] ``` ### Ray homogeneous scaling - Specify `train_instance_count` > 1 Homogeneous scaling allows us to use multiple instances of the same type. Spot instances are unused EC2 instances that could be used at 90% discount compared to On-Demand prices (more information about spot instances can be found [here](https://aws.amazon.com/ec2/spot/?cards.sort-by=item.additionalFields.startDateTime&cards.sort-order=asc) and [here](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html)) To use spot instances, set `train_use_spot_instances = True`. To use On-Demand instances, `train_use_spot_instances = False`. ``` hyperparameters = { # no. of days to simulate. Remember to adjust the dates in RunPeriod of # 'source/eplus/envs/buildings/MediumOffice/RefBldgMediumOfficeNew2004_Chicago.idf' to match simulation days. 'n_days': 365, 'n_iter': 50, # no. of training iterations 'algorithm': 'APEX_DDPG', # only APEX_DDPG and PPO are tested 'multi_zone_control': True, # if each zone temperature set point has to be independently controlled 'energy_temp_penalty_ratio': 10 } # Set additional training parameters training_params = { 'base_job_name': job_name_prefix, 'train_instance_count': 1, 'tags': [{'Key': k, 'Value': str(v)} for k,v in hyperparameters.items()] } # Defining the RLEstimator estimator = RLEstimator(entry_point=f'train-sagemaker-hvac.py', source_dir='source', dependencies=["source/common/"], image_uri=image_name, role=role, train_instance_type=instance_type, # train_instance_type='local', output_path=s3_output_path, metric_definitions=metric_definitions, hyperparameters=hyperparameters, **training_params ) estimator.fit(wait=False) print(' ') print(estimator.latest_training_job.job_name) print('type=', instance_type, 'count=', training_params['train_instance_count']) print(' ') ```
github_jupyter
# Spleen 3D segmentation with MONAI This tutorial demonstrates how MONAI can be used in conjunction with the [PyTorch Lightning](https://github.com/PyTorchLightning/pytorch-lightning) framework. We demonstrate use of the following MONAI features: 1. Transforms for dictionary format data. 2. Loading Nifti images with metadata. 3. Add channel dim to the data if no channel dimension. 4. Scaling medical image intensity with expected range. 5. Croping out a batch of balanced images based on the positive / negative label ratio. 6. Cache IO and transforms to accelerate training and validation. 7. Use of a a 3D UNet model, Dice loss function, and mean Dice metric for a 3D segmentation task. 8. The sliding window inference method. 9. Deterministic training for reproducibility. The training Spleen dataset used in this example can be downloaded from from http://medicaldecathlon.com// ![spleen](http://medicaldecathlon.com/img/spleen0.png) Target: Spleen Modality: CT Size: 61 3D volumes (41 Training + 20 Testing) Source: Memorial Sloan Kettering Cancer Center Challenge: Large ranging foreground size In addition to the usual MONAI requirements you will need Lightning installed. ``` ! pip install pytorch-lightning # Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import glob import numpy as np import torch from torch.utils.data import DataLoader import matplotlib.pyplot as plt import monai from monai.transforms import \ Compose, LoadNiftid, AddChanneld, ScaleIntensityRanged, RandCropByPosNegLabeld, \ RandAffined, Spacingd, Orientationd, ToTensord from monai.data import list_data_collate, sliding_window_inference from monai.networks.layers import Norm from monai.metrics import compute_meandice from pytorch_lightning import LightningModule, Trainer, loggers from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint monai.config.print_config() ``` ## Define the LightningModule The LightningModule contains a refactoring of your training code. The following module is a refactoring of the code in `spleen_segmentation_3d.ipynb`: ``` class Net(LightningModule): def __init__(self): super().__init__() self._model = monai.networks.nets.UNet(dimensions=3, in_channels=1, out_channels=2, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, norm=Norm.BATCH) self.loss_function = monai.losses.DiceLoss(to_onehot_y=True, do_softmax=True) self.best_val_dice = 0 self.best_val_epoch = 0 def forward(self, x): return self._model(x) def prepare_data(self): # set up the correct data path data_root = '/workspace/data/medical/Task09_Spleen' train_images = glob.glob(os.path.join(data_root, 'imagesTr', '*.nii.gz')) train_labels = glob.glob(os.path.join(data_root, 'labelsTr', '*.nii.gz')) data_dicts = [{'image': image_name, 'label': label_name} for image_name, label_name in zip(train_images, train_labels)] train_files, val_files = data_dicts[:-9], data_dicts[-9:] # define the data transforms train_transforms = Compose([ LoadNiftid(keys=['image', 'label']), AddChanneld(keys=['image', 'label']), Spacingd(keys=['image', 'label'], pixdim=(1.5, 1.5, 2.), interp_order=(3, 0)), Orientationd(keys=['image', 'label'], axcodes='RAS'), ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), # randomly crop out patch samples from big image based on pos / neg ratio # the image centers of negative samples must be in valid image area RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', size=(96, 96, 96), pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0), # user can also add other random transforms # RandAffined(keys=['image', 'label'], mode=('bilinear', 'nearest'), prob=1.0, spatial_size=(96, 96, 96), # rotate_range=(0, 0, np.pi/15), scale_range=(0.1, 0.1, 0.1)), ToTensord(keys=['image', 'label']) ]) val_transforms = Compose([ LoadNiftid(keys=['image', 'label']), AddChanneld(keys=['image', 'label']), Spacingd(keys=['image', 'label'], pixdim=(1.5, 1.5, 2.), interp_order=(3, 0)), Orientationd(keys=['image', 'label'], axcodes='RAS'), ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), ToTensord(keys=['image', 'label']) ]) # set deterministic training for reproducibility train_transforms.set_random_state(seed=0) torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # we use cached datasets - these are 10x faster than regular datasets self.train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=1.0) self.val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0) #self.train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) #self.val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) def train_dataloader(self): train_loader = DataLoader(self.train_ds, batch_size=2, shuffle=True, num_workers=4, collate_fn=list_data_collate) return train_loader def val_dataloader(self): val_loader = DataLoader(self.val_ds, batch_size=1, num_workers=4) return val_loader def configure_optimizers(self): optimizer = torch.optim.Adam(self._model.parameters(), 1e-4) return optimizer def training_step(self, batch, batch_idx): images, labels = batch['image'], batch['label'] output = self.forward(images) loss = self.loss_function(output, labels) tensorboard_logs = {'train_loss': loss.item()} return {'loss': loss, 'log': tensorboard_logs} def validation_step(self, batch, batch_idx): images, labels = batch['image'], batch['label'] roi_size = (160, 160, 160) sw_batch_size = 4 outputs = sliding_window_inference(images, roi_size, sw_batch_size, self.forward) loss = self.loss_function(outputs, labels) value = compute_meandice(y_pred=outputs, y=labels, include_background=False, to_onehot_y=True, mutually_exclusive=True) return {'val_loss': loss, 'val_dice': value} def validation_epoch_end(self, outputs): val_dice = 0 num_items = 0 for output in outputs: val_dice += output['val_dice'].sum().item() num_items += len(output['val_dice']) mean_val_dice = val_dice / num_items tensorboard_logs = {'val_dice': mean_val_dice} if mean_val_dice > self.best_val_dice: self.best_val_dice = mean_val_dice self.best_val_epoch = self.current_epoch print('current epoch %d current mean dice: %0.4f best mean dice: %0.4f at epoch %d' % (self.current_epoch, mean_val_dice, self.best_val_dice, self.best_val_epoch)) return {'log': tensorboard_logs} ``` ## Run the training ``` # initialise the LightningModule net = Net() # set up loggers and checkpoints tb_logger = loggers.TensorBoardLogger(save_dir='logs') checkpoint_callback = ModelCheckpoint(filepath='logs/{epoch}-{val_loss:.2f}-{val_dice:.2f}') # initialise Lightning's trainer. trainer = Trainer(gpus=[0], max_epochs=250, logger=tb_logger, checkpoint_callback=checkpoint_callback, show_progress_bar=False, num_sanity_val_steps=1 ) # train trainer.fit(net) print('train completed, best_metric: %0.4f at epoch %d' % (net.best_val_dice, net.best_val_epoch)) ``` ## View training in tensorboard ``` %load_ext tensorboard %tensorboard --logdir='logs' ``` ## Check best model output with the input image and label ``` net.eval() device = torch.device("cuda:0") with torch.no_grad(): for i, val_data in enumerate(net.val_dataloader()): roi_size = (160, 160, 160) sw_batch_size = 4 val_outputs = sliding_window_inference(val_data['image'].to(device), roi_size, sw_batch_size, net) # plot the slice [:, :, 50] plt.figure('check', (18, 6)) plt.subplot(1, 3, 1) plt.title('image ' + str(i)) plt.imshow(val_data['image'][0, 0, :, :, 50], cmap='gray') plt.subplot(1, 3, 2) plt.title('label ' + str(i)) plt.imshow(val_data['label'][0, 0, :, :, 50]) plt.subplot(1, 3, 3) plt.title('output ' + str(i)) plt.imshow(torch.argmax(val_outputs, dim=1).detach().cpu()[0, :, :, 50]) plt.show() ```
github_jupyter
# Westeros Tutorial Part 1 - Welcome to the MESSAGEix framework & Creating a baseline scenario ### *Integrated Assessment Modeling for the 21st Century* For information on how to install *MESSAGEix*, please refer to [Installation page](https://message.iiasa.ac.at/en/stable/getting_started.html) and for getting *MESSAGEix* tutorials, please follow the steps mentioned in [Tutorials](https://message.iiasa.ac.at/en/stable/tutorials.html). Please refer to the [user guidelines](https://github.com/iiasa/message_ix/blob/master/NOTICE.rst) for additional information on using *MESSAGEix*, including the recommended citation and how to name new models. **Structure of these tutorials.** After having run this baseline tutorial, you are able to start with any of the other tutorials, but we recommend to follow the order below for going through the information step-wise: 1. Baseline tutorial (``westeros_baseline.ipynb``) 2. Add extra detail and constraints to the model 1. Emissions 1. Introducing emissions (`westeros_emissions_bounds.ipynb`) 2. Introducing taxes on emissions (`westeros_emissions_taxes.ipynb`) 2. Add firm capacity (``westeros_firm_capacity.ipynb``) 3. Add flexible energy generation (``westeros_flexible_generation.ipynb``) 4. Add seasonality as an example of temporal variability (``westeros_seasonality.ipynb``) 3. Post-processing: learn how to report calculations _after_ the MESSAGE model has run (``westeros_report.ipynb``) **Pre-requisites** - Have succesfully installed *MESSAGEix*. _This tutorial is based on a presentation by Matthew Gidden ([@gidden](https://github.com/gidden)) for a summer school at the the **Centre National de la Recherche Scientifique (CNRS)** on *Integrated Assessment Modeling* in June 2018._ ## Scope of this tutorial: Building a Simple Energy Model The goal of this tutorial is to build a simple energy model using *MESSAGEix* with minimal features that can be expanded in future tutorials. We will build the model component by component, focusing on both the **how** (code implementation) and **why** (mathematical formulation). ## Online documentation The full framework documentation is available at [https://message.iiasa.ac.at](https://message.iiasa.ac.at) <img src='_static/doc_page.png'> ## A stylized reference energy system model for Westeros This tutorial is based on the country of Westeros from the TV show "Game of Thrones". <table align='center'><tr><td><img src='_static/westeros.jpg' width='150'></td><td><img src='_static/base_res.png'></td></tr></table> ## MESSAGEix: the mathematical paradigm At its core, *MESSAGEix* is an optimization problem: > $\min \quad ~c^T \cdot x$ > $~s.t. \quad A \cdot x \leq b$ More explicitly, the model... - optimizes an **objective function**, nominally minimizing total **system costs** - under a system of **constraints** (inequalities or equality conditions) The mathematical implementation includes a number of features that make it particularly geared towards the modelling of *energy-water-land systems* in the context of *climate change mitigation and sustainable development*. Throughout this document, the mathematical formulation follows the convention that - decision **VARIABLES** ($x$) are capitalized - input **parameters** ($A$, $b$) are lower case ## MESSAGEix: connected to the *ix modeling platform (ixmp)* The *modeling platform for integrated and cross-cutting analysis* (ixmp) provides a powerful framework for working with scenarios, including a database infrastucture for data version control and interfaces to scientific programming languages. <img src='_static/message_ixmp.png' width='700'> ## Ready, steady, go! First, we import all the packages we need. We import a utility function called *make_df*, which can be used to wrap the input data into dataframes that can be saved in model parameters. ``` import pandas as pd import ixmp import message_ix from message_ix.utils import make_df %matplotlib inline ``` The *MESSAGEix* model is built using the *ixmp* `Platform`. The `Platform` is your connection to a database for storing model input data and scenario results. ``` mp = ixmp.Platform() ``` Once connected, we create a new `Scenario` to build our model. A `Scenario` instance will contain all the model input data and results. ``` scenario = message_ix.Scenario(mp, model='Westeros Electrified', scenario='baseline', version='new') ``` ## Model Structure We start by defining basic characteristics of the model, including time, space, and the energy system structure. The model horizon will span 3 decades (690-720). Let's assume that we're far in the future after the events of A Song of Ice and Fire (which occur ~300 years after Aegon the conqueror). | Math Notation | Model Meaning | |---------------|------------------------------| | $y \in Y^H$ | time periods in history | | $y \in Y^M$ | time periods in model horizon| ``` history = [690] model_horizon = [700, 710, 720] scenario.add_horizon({'year': history + model_horizon, 'firstmodelyear': model_horizon[0]}) ``` Our model will have a single `node`, i.e., its spatial dimension. | Math Notation | Model Meaning| |---------------|--------------| | $n \in N$ | node | ``` country = 'Westeros' scenario.add_spatial_sets({'country': country}) ``` And we fill in the energy system's `commodities`, `levels`, `technologies`, and `modes` (i.e., modes of operation of technologies). This information defines how certain technologies operate. | Math Notation | Model Meaning| |---------------|--------------| | $c \in C$ | commodity | | $l \in L$ | level | | $t \in T$ | technology | | $m \in M$ | mode | ``` scenario.add_set("commodity", ["electricity", "light"]) scenario.add_set("level", ["secondary", "final", "useful"]) scenario.add_set("technology", ['coal_ppl', 'wind_ppl', 'grid', 'bulb']) scenario.add_set("mode", "standard") ``` ## Supply and Demand (or Balancing Commodities) The fundamental premise of the model is to satisfy demand for energy (services). To first order, demand for services like electricity track with economic productivity (GDP). We define a GDP profile similar to first-world GDP growth from [1900-1930](https://en.wikipedia.org/wiki/List_of_regions_by_past_GDP): ``` gdp_profile = pd.Series([1., 1.5, 1.9], index=pd.Index(model_horizon, name='Time')) gdp_profile.plot(title='Demand') ``` The `COMMODITY_BALANCE_GT` and `COMMODITY_BALANCE_LT` equations ensure that `demand` for each `commodity` is met at each `level` in the energy system. The equation is copied below in this tutorial notebook, but every model equation is available for reference in the [Mathematical formulation](https://message.iiasa.ac.at/en/stable/model/MESSAGE/model_core.html#) section of the *MESSAGEix* documentation. $\sum_{\substack{n^L,t,m \\ y^V \leq y}} \text{output}_{n^L,t,y^V,y,m,n,c,l} \cdot \text{ACT}_{n^L,t,y^V,y,m}$ $- \sum_{\substack{n^L,t,m, \\ y^V \leq y}} \text{input}_{n^L,t,y^V,y,m,n,c,l} \cdot \text{ACT}_{n^L,t,m,y}$ $\geq \text{demand}_{n,c,l,y} \quad \forall \ l \in L$ While `demand` must be met, supply can *exceed* demand allowing the model to plan for meeting demand in future periods by storing storable commodities. First we establish demand. Let's assume - 40 million people in [300 AC](https://atlasoficeandfireblog.wordpress.com/2016/03/06/the-population-of-the-seven-kingdoms/) - similar population growth to Earth in the same time frame [(~factor of 12)](https://en.wikipedia.org/wiki/World_population_estimates) - a per capita demand for electricity of 1000 kWh - and 8760 hours in a year (of course!) Then we can add the demand parameter Note present day: [~72000 GWh in Austria](https://www.iea.org/statistics/?country=AUSTRIA&year=2016&category=Energy%20consumption&indicator=undefined&mode=chart&dataTable=INDICATORS) with population [~8.7M](http://www.austria.org/population/) which is ~8300 kWh per capita ``` demand_per_year = 40 * 12 * 1000 / 8760 light_demand = pd.DataFrame({ 'node': country, 'commodity': 'light', 'level': 'useful', 'year': model_horizon, 'time': 'year', 'value': (100 * gdp_profile).round(), 'unit': 'GWa', }) ``` `light_demand` illustrates the data format for *MESSAGEix* parameters. It is a `pandas.DataFrame` containing three types of information in a specific format: - A "value" column containing the numerical values for this parameter. - A "unit" column. - Other columns ("node", "commodity", "level", "time") that indicate the key to which each value applies. ``` light_demand # We use add_par for adding data to a MESSAGEix parameter scenario.add_par("demand", light_demand) ``` In order to define the input and output commodites of each technology, we define some common keys. - **Input** quantities require `_origin` keys that specify where the inputs are *received from*. - **Output** quantities require `_dest` keys that specify where the outputs are *transferred to*. ``` year_df = scenario.vintage_and_active_years() vintage_years, act_years = year_df['year_vtg'], year_df['year_act'] base = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'time': 'year', 'unit': '-', } base_input = make_df(base, node_origin=country, time_origin='year') base_output = make_df(base, node_dest=country, time_dest='year') ``` Working backwards along the Reference Energy System, we can add connections for the `bulb`. A light bulb… - receives *input* in the form of the "electricity" *commodity* at the "final [energy]" *level*, and - *outputs* the commodity "light" at the "useful [energy]" level. The `value` in the input and output parameter is used to represent the effiecieny of a technology (efficiency = output/input). For example, input of 1.0 and output of 1.0 for a technology shows that the efficiency of that technology is 100% in converting the input commodity to the output commodity. ``` bulb_out = make_df(base_output, technology='bulb', commodity='light', level='useful', value=1.0) scenario.add_par('output', bulb_out) bulb_in = make_df(base_input, technology='bulb', commodity='electricity', level='final', value=1.0) scenario.add_par('input', bulb_in) ``` Next, we parameterize the electrical `grid`, which… - receives electricity at the "secondary" energy level. - also outputs electricity, but at the "final" energy level (to be used by the light bulb). Because the grid has transmission losses, only 90% of the input electricity is available as output. ``` grid_efficiency = 0.9 grid_out = make_df(base_output, technology='grid', commodity='electricity', level='final', value=grid_efficiency) scenario.add_par('output', grid_out) grid_in = make_df(base_input, technology='grid', commodity='electricity', level='secondary', value=1.0) scenario.add_par('input', grid_in) ``` And finally, our power plants. The model does not include the fossil resources used as `input` for coal plants; however, costs of coal extraction are included in the parameter $variable\_cost$. ``` coal_out = make_df(base_output, technology='coal_ppl', commodity='electricity', level='secondary', value=1.) scenario.add_par('output', coal_out) wind_out = make_df(base_output, technology='wind_ppl', commodity='electricity', level='secondary', value=1.) scenario.add_par('output', wind_out) ``` ## Operational Constraints and Parameters The model has a number of "reality" constraints, which relate built *capacity* (`CAP`) to available power, or the *activity* (`ACT`) of that technology. The **capacity constraint** limits the activity of a technology to the installed capacity multiplied by a capacity factor. Capacity factor or is the fraction of installed capacity that can be active in a certain period (here the sub-annual time step *h*). $$\sum_{m} \text{ACT}_{n,t,y^V,y,m,h} \leq \text{duration_time}_{h} \cdot \text{capacity_factor}_{n,t,y^V,y,h} \cdot \text{CAP}_{n,t,y^V,y} \quad t \ \in \ T^{INV}$$ This requires us to provide the `capacity_factor` for each technology. Here, we call `make_df()` and `add_par()` in a loop to execute similar code for three technologies: ``` base_capacity_factor = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'time': 'year', 'unit': '-', } capacity_factor = { 'coal_ppl': 1, 'wind_ppl': 0.36, 'bulb': 1, } for tec, val in capacity_factor.items(): df = make_df(base_capacity_factor, technology=tec, value=val) scenario.add_par('capacity_factor', df) ``` The model can further be provided `technical_lifetime`s in order to properly manage deployed capacity and related costs via the **capacity maintenance** constraint: $\text{CAP}_{n,t,y^V,y} \leq \text{remaining_capacity}_{n,t,y^V,y} \cdot \text{value} \quad \forall \quad t \in T^{INV}$ where `value` can take different forms depending on what time period is considered: | Value | Condition | |-------------------------------------|-----------------------------------------------------| | $\Delta_y \text{historical_new_capacity}_{n,t,y^V}$ | $y$ is first model period | | $\Delta_y \text{CAP_NEW}_{n,t,y^V}$ | $y = y^V$ | | $\text{CAP}_{n,t,y^V,y-1}$ | $0 < y - y^V < \text{technical_lifetime}_{n,t,y^V}$ | ``` base_technical_lifetime = { 'node_loc': country, 'year_vtg': model_horizon, 'unit': 'y', } lifetime = { 'coal_ppl': 20, 'wind_ppl': 20, 'bulb': 1, } for tec, val in lifetime.items(): df = make_df(base_technical_lifetime, technology=tec, value=val) scenario.add_par('technical_lifetime', df) ``` ## Technological Diffusion and Contraction We know from historical precedent that energy systems can not be transformed instantaneously. Therefore, we use a family of dynamic constraints on activity and capacity. These constraints define the upper and lower limit of the domain of activity and capacity over time based on their value in the previous time step, an initial value, and growth/decline rates. $\sum_{y^V \leq y,m} \text{ACT}_{n,t,y^V,y,m,h} \leq$ $\text{initial_activity_up}_{n,t,y,h} \cdot \frac{ \Big( 1 + growth\_activity\_up_{n,t,y,h} \Big)^{|y|} - 1 } { growth\_activity\_up_{n,t,y,h} }+ \Big( 1 + growth\_activity\_up_{n,t,y,h} \Big)^{|y|} \cdot \Big( \sum_{y^V \leq y-1,m} ACT_{n,t,y^V,y-1,m,h} + \sum_{m} historical\_activity_{n,t,y-1,m,h}\Big)$ This example limits the ability for technologies to **grow**. To do so, we need to provide `growth_activity_up` values for each technology that we want to model as being diffusion constrained. Here, we set this constraint at 10% per year. ``` base_growth = { 'node_loc': country, 'year_act': model_horizon, 'time': 'year', 'unit': '-', } growth_technologies = [ "coal_ppl", "wind_ppl", ] for tec in growth_technologies: df = make_df(base_growth, technology=tec, value=0.1) scenario.add_par('growth_activity_up', df) ``` ## Defining an Energy Mix (Model Calibration) To model the transition of an energy system, one must start with the existing system which are defined by the parameters `historical_activity` and `historical_new_capacity`. These parameters define the energy mix before the model horizon. We begin by defining a few key values: - how much useful energy was needed - how much final energy was generated - and the mix for different technologies ``` historic_demand = 0.85 * demand_per_year historic_generation = historic_demand / grid_efficiency coal_fraction = 0.6 base_capacity = { 'node_loc': country, 'year_vtg': history, 'unit': 'GWa', } base_activity = { 'node_loc': country, 'year_act': history, 'mode': 'standard', 'time': 'year', 'unit': 'GWa', } ``` Then, we can define the **activity** and **capacity** in the historic period ``` old_activity = { 'coal_ppl': coal_fraction * historic_generation, 'wind_ppl': (1 - coal_fraction) * historic_generation, } for tec, val in old_activity.items(): df = make_df(base_activity, technology=tec, value=val) scenario.add_par('historical_activity', df) act_to_cap = { 'coal_ppl': 1 / 10 / capacity_factor['coal_ppl'] / 2, # 20 year lifetime 'wind_ppl': 1 / 10 / capacity_factor['wind_ppl'] / 2, } for tec in act_to_cap: value = old_activity[tec] * act_to_cap[tec] df = make_df(base_capacity, technology=tec, value=value) scenario.add_par('historical_new_capacity', df) ``` ## Objective Function The objective function drives the purpose of the optimization. Do we wish to seek maximum utility of the social planner, minimize carbon emissions, or something else? Classical IAMs seek to minimize total discounted system cost over space and time. $$\min \sum_{n,y \in Y^{M}} \text{interestrate}_{y} \cdot \text{COST_NODAL}_{n,y}$$ First, let's add the interest rate parameter. ``` scenario.add_par("interestrate", model_horizon, value=0.05, unit='-') ``` `COST_NODAL` is comprised of a variety of costs related to the use of different technologies. ### Investment Costs Capital, or investment, costs are invoked whenever a new plant or unit is built $$\text{inv_cost}_{n,t,y} \cdot \text{construction_time_factor}_{n,t,y} \cdot \text{CAP_NEW}_{n,t,y}$$ ``` base_inv_cost = { 'node_loc': country, 'year_vtg': model_horizon, 'unit': 'USD/kW', } # Adding a new unit to the library mp.add_unit('USD/kW') # in $ / kW (specific investment cost) costs = { 'coal_ppl': 500, 'wind_ppl': 1500, 'bulb': 5, } for tec, val in costs.items(): df = make_df(base_inv_cost, technology=tec, value=val) scenario.add_par('inv_cost', df) ``` ### Fixed O&M Costs Fixed cost are only relevant as long as the capacity is active. This formulation allows to include the potential cost savings from early retirement of installed capacity. $$\sum_{y^V \leq y} \text{fix_cost}_{n,t,y^V,y} \cdot \text{CAP}_{n,t,y^V,y}$$ ``` base_fix_cost = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'unit': 'USD/kWa', } # in $ / kW / year (every year a fixed quantity is destinated to cover part of the O&M costs # based on the size of the plant, e.g. lightning, labor, scheduled maintenance, etc.) costs = { 'coal_ppl': 30, 'wind_ppl': 10, } for tec, val in costs.items(): df = make_df(base_fix_cost, technology=tec, value=val) scenario.add_par('fix_cost', df) ``` ### Variable O&M Costs Variable Operation and Maintence costs are associated with the costs of actively running the plant. Thus, they are not applied if a plant is on standby (i.e., constructed, but not currently in use). $$\sum_{\substack{y^V \leq y \\ m,h}} \text{var_cost}_{n,t,y^V,y,m,h} \cdot \text{ACT}_{n,t,y^V,y,m,h} $$ ``` base_var_cost = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'time': 'year', 'unit': 'USD/kWa', } # in $ / kWa (costs associatied to the degradation of equipment when the plant is functioning # per unit of energy produced kW·year = 8760 kWh. # Therefore this costs represents USD per 8760 kWh of energy). Do not confuse with fixed O&M units. costs = { 'coal_ppl': 30, 'grid': 50, } for tec, val in costs.items(): df = make_df(base_var_cost, technology=tec, value=val) scenario.add_par('var_cost', df) ``` A full model will also have costs associated with - costs associated with technologies (investment, fixed, variable costs) - resource extraction: $\sum_{c,g} \ resource\_cost_{n,c,g,y} \cdot EXT_{n,c,g,y} $ - emissions - land use (emulator): $\sum_{s} land\_cost_{n,s,y} \cdot LAND_{n,s,y}$ ## Time to Solve the Model First, we *commit* the model structure and input data (sets and parameters). In the `ixmp` backend, this creates a new model version in the database, which is assigned a version number automatically: ``` from message_ix import log log.info('version number prior to commit: {}'.format(scenario.version)) scenario.commit(comment='basic model of Westeros electrification') log.info('version number prior committing to the database: {}'.format(scenario.version)) ``` An `ixmp` database can contain many scenarios, and possibly multiple versions of the same model and scenario name. These are distinguished by unique version numbers. To make it easier to retrieve the "correct" version (e.g., the latest one), you can set a specific scenario as the default version to use if the "Westeros Electrified" model is loaded from the `ixmp` database. ``` scenario.set_as_default() scenario.solve() scenario.var('OBJ')['lvl'] ``` ## Plotting Results We make use of some custom code for plotting the results; see `tools.py` in the tutorial directory. ``` from tools import Plots p = Plots(scenario, country, firstyear=model_horizon[0]) ``` ### Activity How much energy is generated in each time period from the different potential sources? ``` p.plot_activity(baseyear=True, subset=['coal_ppl', 'wind_ppl']) ``` ### Capacity How much capacity of each plant is installed in each period? ``` p.plot_capacity(baseyear=True, subset=['coal_ppl', 'wind_ppl']) ``` ### Electricity Price And how much does the electricity cost? These prices are in fact **shadow prices** taken from the **dual variables** of the model solution. They reflect the marginal cost of electricity generation (i.e., the additional cost of the system for supplying one more unit of electricity), which is in fact the marginal cost of the most expensive operating generator. Note the price drop when the most expensive technology is no longer in the system. ``` p.plot_prices(subset=['light'], baseyear=True) ``` ## Close the connection to the database When working with local HSQLDB database instances, you cannot connect to one database from multipe Jupyter notebooks (or processes) at the same time. If you want to easily switch between notebooks with connections to the same `ixmp` database, you need to close the connection in one notebook before initializing the platform using `ixmp.Platform()` in another notebook. After having closed the database connection, you can reopen it using ``` mp.open_db() ``` ``` mp.close_db() ``` ## Congratulations! You have built and run your very first *MESSAGEix* model. Welcome to the community! The next tutorials will introduce you to other features of the framework, including energy system constraints, emissions taxes, and other policy options. Check us out on Github https://github.com/iiasa/message_ix and get in touch with us online https://groups.google.com/forum/message-ix ...
github_jupyter
## Set Up Today you will create partial dependence plots and practice building insights with data from the [Taxi Fare Prediction](https://www.kaggle.com/c/new-york-city-taxi-fare-prediction) competition. We have again provided code to do the basic loading, review and model-building. Run the cell below to set everything up: ``` import pandas as pd from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split # Environment Set-Up for feedback system. from learntools.core import binder binder.bind(globals()) from learntools.ml_explainability.ex3 import * print("Setup Complete") # Data manipulation code below here data = pd.read_csv('../input/new-york-city-taxi-fare-prediction/train.csv', nrows=50000) # Remove data with extreme outlier coordinates or negative fares data = data.query('pickup_latitude > 40.7 and pickup_latitude < 40.8 and ' + 'dropoff_latitude > 40.7 and dropoff_latitude < 40.8 and ' + 'pickup_longitude > -74 and pickup_longitude < -73.9 and ' + 'dropoff_longitude > -74 and dropoff_longitude < -73.9 and ' + 'fare_amount > 0' ) y = data.fare_amount base_features = ['pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude'] X = data[base_features] train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) first_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(train_X, train_y) print("Data sample:") data.head() data.describe() ``` ## Question 1 Here is the code to plot the partial dependence plot for pickup_longitude. Run the following cell. ``` from matplotlib import pyplot as plt from pdpbox import pdp, get_dataset, info_plots feat_name = 'pickup_longitude' pdp_dist = pdp.pdp_isolate(model=first_model, dataset=val_X, model_features=base_features, feature=feat_name) pdp.pdp_plot(pdp_dist, feat_name) plt.show() ``` Why does the partial dependence plot have this U-shape? Does your explanation suggest what shape to expect in the partial dependence plots for the other features? Create all other partial plots in a for-loop below (copying the appropriate lines from the code above). ``` for feat_name in base_features: pdp_dist = _ _ plt.show() ``` Do the shapes match your expectations for what shapes they would have? Can you explain the shape now that you've seen them? Uncomment the following line to check your intuition. ``` # q_1.solution() ``` ## Q2 Now you will run a 2D partial dependence plot. As a reminder, here is the code from the tutorial. ``` inter1 = pdp.pdp_interact(model=my_model, dataset=val_X, model_features=feature_names, features=['Goal Scored', 'Distance Covered (Kms)']) pdp.pdp_interact_plot(pdp_interact_out=inter1, feature_names=['Goal Scored', 'Distance Covered (Kms)'], plot_type='contour') plt.show() ``` Create a 2D plot for the features `pickup_longitude` and `dropoff_longitude`. Plot it appropriately? What do you expect it to look like? ``` # Add your code here ``` Uncomment the line below to see the solution and explanation for how one might reason about the plot shape. ``` # q_2.solution() ``` ## Question 3 Consider a ride starting at longitude -73.92 and ending at longitude -74. Using the graph from the last question, estimate how much money the rider would have saved if they'd started the ride at longitude -73.98 instead? ``` savings_from_shorter_trip = _ q_3.check() ``` For a solution or hint, uncomment the appropriate line below. ``` # q_3.hint() # q_3.solution() ``` ## Question 4 In the PDP's you've seen so far, location features have primarily served as a proxy to capture distance traveled. In the permutation importance lessons, you added the features `abs_lon_change` and `abs_lat_change` as a more direct measure of distance. Create these features again here. You only need to fill in the top two lines. Then run the following cell. **After you run it, identify the most important difference between this partial dependence plot and the one you got without absolute value features. The code to generate the PDP without absolute value features is at the top of this code cell.** --- ``` # This is the PDP for pickup_longitude without the absolute difference features. Included here to help compare it to the new PDP you create feat_name = 'pickup_longitude' pdp_dist_original = pdp.pdp_isolate(model=first_model, dataset=val_X, model_features=base_features, feature=feat_name) pdp.pdp_plot(pdp_dist_original, feat_name) plt.show() # create new features data['abs_lon_change'] = _ data['abs_lat_change'] = _ features_2 = ['pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'abs_lat_change', 'abs_lon_change'] X = data[features_2] new_train_X, new_val_X, new_train_y, new_val_y = train_test_split(X, y, random_state=1) second_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(new_train_X, new_train_y) feat_name = 'pickup_longitude' pdp_dist = pdp.pdp_isolate(model=second_model, dataset=new_val_X, model_features=features_2, feature=feat_name) pdp.pdp_plot(pdp_dist, feat_name) plt.show() q_4.check() ``` Uncomment the lines below to see a hint or the solution (including an explanation of the important differences between the plots). ``` # q_4.hint() # q_4.solution() ``` ## Question 5 Consider a scenario where you have only 2 predictive features, which we will call `feat_A` and `feat_B`. Both features have minimum values of -1 and maximum values of 1. The partial dependence plot for `feat_A` increases steeply over its whole range, whereas the partial dependence plot for feature B increases at a slower rate (less steeply) over its whole range. Does this guarantee that `feat_A` will have a higher permutation importance than `feat_B`. Why or why not? After you've thought about it, uncomment the line below for the solution. ``` # q_5.solution() ``` ## Q6 The code cell below does the following: 1. Creates two features, `X1` and `X2`, having random values in the range [-2, 2]. 2. Creates a target variable `y`, which is always 1. 3. Trains a `RandomForestRegressor` model to predict `y` given `X1` and `X2`. 4. Creates a PDP plot for `X1` and a scatter plot of `X1` vs. `y`. Do you have a prediction about what the PDP plot will look like? Run the cell to find out. Modify the initialization of `y` so that our PDP plot has a positive slope in the range [-1,1], and a negative slope everywhere else. (Note: *you should only modify the creation of `y`, leaving `X1`, `X2`, and `my_model` unchanged.*) ``` import numpy as np from numpy.random import rand n_samples = 20000 # Create array holding predictive feature X1 = 4 * rand(n_samples) - 2 X2 = 4 * rand(n_samples) - 2 # Create y. you should have X1 and X2 in the expression for y y = np.ones(n_samples) # create dataframe because pdp_isolate expects a dataFrame as an argument my_df = pd.DataFrame({'X1': X1, 'X2': X2, 'y': y}) predictors_df = my_df.drop(['y'], axis=1) my_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(predictors_df, my_df.y) pdp_dist = pdp.pdp_isolate(model=my_model, dataset=my_df, model_features=['X1', 'X2'], feature='X1') # visualize your results pdp.pdp_plot(pdp_dist, 'X1') plt.show() q_6.check() ``` Uncomment the lines below for a hint or solution ``` # q_6.hint() # q_6.solution() ``` ## Question 7 Create a dataset with 2 features and a target, such that the pdp of the first feature is flat, but its permutation importance is high. We will use a RandomForest for the model. *Note: You only need to supply the lines that create the variables X1, X2 and y. The code to build the model and calculate insights is provided*. ``` import eli5 from eli5.sklearn import PermutationImportance n_samples = 20000 # Create array holding predictive feature X1 = _ X2 = _ # Create y. you should have X1 and X2 in the expression for y y = _ # create dataframe because pdp_isolate expects a dataFrame as an argument my_df = pd.DataFrame({'X1': X1, 'X2': X2, 'y': y}) predictors_df = my_df.drop(['y'], axis=1) my_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(predictors_df, my_df.y) pdp_dist = pdp.pdp_isolate(model=my_model, dataset=my_df, model_features=['X1', 'X2'], feature='X1') pdp.pdp_plot(pdp_dist, 'X1') plt.show() perm = PermutationImportance(my_model).fit(predictors_df, my_df.y) q_7.check() # show the weights for the permutation importance you just calculated eli5.show_weights(perm, feature_names = ['X1', 'X2']) # Uncomment the following lines for the hint or solution # q_7.hint() # q_7.solution() ``` ## Keep Going Partial dependence plots can be really interesting. We have a [discussion thread](https://www.kaggle.com/learn-forum/65782) to talk about what real-world topics or questions you'd be curious to see addressed with partial dependence plots. Next, learn how **[SHAP values](#$NEXT_NOTEBOOK_URL$)** help you understand the logic for each individual prediction.
github_jupyter
``` Question 1 Create a function that takes an integer and returns a list from 1 to the given number, where: 1. If the number can be divided evenly by 4, amplify it by 10 (i.e. return 10 times the number). 2. If the number cannot be divided evenly by 4, simply return the number. Examples amplify(4) ➞ [1, 2, 3, 40] amplify(3) ➞ [1, 2, 3] amplify(25) ➞ [1, 2, 3, 40, 5, 6, 7, 80, 9, 10, 11, 120, 13, 14, 15, 160, 17, 18, 19, 200, 21, 22, 23, 240, 25] Notes • The given integer will always be equal to or greater than 1. • Include the number (see example above). • To perform this problem with its intended purpose, try doing it with list comprehensions. If that's too difficult, just solve the challenge any way you can. def amplify(n): return [i*10 if i%4==0 else i for i in range(1, n+1)] amplify(4) amplify(3) amplify(25) Question 2 Create a function that takes a list of numbers and return the number that's unique. Examples unique([3, 3, 3, 7, 3, 3]) ➞ 7 unique([0, 0, 0.77, 0, 0]) ➞ 0.77 unique([0, 1, 1, 1, 1, 1, 1, 1]) ➞ 0 Notes Test cases will always have exactly one unique number while all others are the same. def unique(l): for i in list(set(l)): if l.count(i)==1: return i unique([3, 3, 3, 7, 3, 3]) unique([0, 0, 0.77, 0, 0]) unique([0, 1, 1, 1, 1, 1, 1, 1]) Question 3 Your task is to create a Circle constructor that creates a circle with a radius provided by an argument. The circles constructed must have two getters getArea() (PIr^2) and getPerimeter() (2PI*r) which give both respective areas and perimeter (circumference). For help with this class, I have provided you with a Rectangle constructor which you can use as a base example. Examples circy = Circle(11) circy.getArea() # Should return 380.132711084365 circy = Circle(4.44) circy.getPerimeter() # Should return 27.897342763877365 Notes Round results up to the nearest integer. class Circle: def __init__(self,r): self.radius = r def getArea(self): return round(3.14*self.radius*self.radius) def getPerimeter(self): return round(2*3.14*self.radius) circy = Circle(11) circy.getArea() circy = Circle(4.44) circy.getPerimeter() Question 4 Create a function that takes a list of strings and return a list, sorted from shortest to longest. Examples sort_by_length(["Google", "Apple", "Microsoft"]) ➞ ["Apple", "Google", "Microsoft"] sort_by_length(["Leonardo", "Michelangelo", "Raphael", "Donatello"]) ➞ ["Raphael", "Leonardo", "Donatello", "Michelangelo"] sort_by_length(["Turing", "Einstein", "Jung"]) ➞ ["Jung", "Turing", "Einstein"] Notes All test cases contain lists with strings of different lengths, so you won't have to deal with multiple strings of the same length. def sort_by_length(l): return (sorted(l, key = len)) sort_by_length(["Google", "Apple", "Microsoft"]) sort_by_length(["Leonardo", "Michelangelo", "Raphael", "Donatello"]) sort_by_length(["Turing", "Einstein", "Jung"]) Question 5 Create a function that validates whether three given integers form a Pythagorean triplet. The sum of the squares of the two smallest integers must equal the square of the largest number to be validated. Examples is_triplet(3, 4, 5) ➞ True # 3² + 4² = 25 # 5² = 25 is_triplet(13, 5, 12) ➞ True # 5² + 12² = 169 # 13² = 169 is_triplet(1, 2, 3) ➞ False # 1² + 2² = 5 # 3² = 9 Notes Numbers may not be given in a sorted order. def is_triplet(*args): l = [] l.extend((args)) l = sorted(l) if l[0]**2 + l[1]**2 == l[2]**2: return True else: return False is_triplet(3, 4, 5) is_triplet(13, 5, 12) is_triplet(1, 2, 3) ```
github_jupyter
``` import numpy as np import pandas as pd from matplotlib import pyplot as plt from tqdm import tqdm as tqdm %matplotlib inline import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import random # from google.colab import drive # drive.mount('/content/drive') transform = transforms.Compose( [transforms.CenterCrop((28,28)),transforms.ToTensor(),transforms.Normalize([0.5], [0.5])]) mnist_trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform) mnist_testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform) index1 = [np.where(mnist_trainset.targets==0)[0] , np.where(mnist_trainset.targets==1)[0] ] index1 = np.concatenate(index1,axis=0) len(index1) #12665 true = 1000 total = 47000 sin = total-true sin epochs = 300 indices = np.random.choice(index1,true) indices.shape index = np.where(np.logical_and(mnist_trainset.targets!=0,mnist_trainset.targets!=1))[0] #47335 index.shape req_index = np.random.choice(index.shape[0], sin, replace=False) # req_index index = index[req_index] index.shape values = np.random.choice([0,1],size= sin) print(sum(values ==0),sum(values==1), sum(values ==0) + sum(values==1) ) mnist_trainset.data = torch.cat((mnist_trainset.data[indices],mnist_trainset.data[index])) mnist_trainset.targets = torch.cat((mnist_trainset.targets[indices],torch.Tensor(values).type(torch.LongTensor))) mnist_trainset.targets.shape, mnist_trainset.data.shape # mnist_trainset.targets[index] = torch.Tensor(values).type(torch.LongTensor) j =20078 # Without Shuffle upto True Training numbers correct , after that corrupted print(plt.imshow(mnist_trainset.data[j]),mnist_trainset.targets[j]) trainloader = torch.utils.data.DataLoader(mnist_trainset, batch_size=250,shuffle=True, num_workers=2) testloader = torch.utils.data.DataLoader(mnist_testset, batch_size=250,shuffle=False, num_workers=2) mnist_trainset.data.shape classes = ('zero', 'one') dataiter = iter(trainloader) images, labels = dataiter.next() images[:4].shape def imshow(img): img = img / 2 + 0.5 # unnormalize npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() imshow(torchvision.utils.make_grid(images[:10])) print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(10))) class Module2(nn.Module): def __init__(self): super(Module2, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 4 * 4, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 10) self.fc4 = nn.Linear(10,2) def forward(self,z): y1 = self.pool(F.relu(self.conv1(z))) y1 = self.pool(F.relu(self.conv2(y1))) # print(y1.shape) y1 = y1.view(-1, 16 * 4 * 4) y1 = F.relu(self.fc1(y1)) y1 = F.relu(self.fc2(y1)) y1 = F.relu(self.fc3(y1)) y1 = self.fc4(y1) return y1 inc = Module2() inc = inc.to("cuda") criterion_inception = nn.CrossEntropyLoss() optimizer_inception = optim.SGD(inc.parameters(), lr=0.01, momentum=0.9) acti = [] loss_curi = [] for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") # print(inputs.shape) # zero the parameter gradients optimizer_inception.zero_grad() # forward + backward + optimize outputs = inc(inputs) loss = criterion_inception(outputs, labels) loss.backward() optimizer_inception.step() # print statistics running_loss += loss.item() if i % 50 == 49: # print every 50 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 50)) ep_lossi.append(running_loss/50) # loss per minibatch running_loss = 0.0 loss_curi.append(np.mean(ep_lossi)) #loss per epoch if (np.mean(ep_lossi)<=0.03): break # acti.append(actis) print('Finished Training') correct = 0 total = 0 with torch.no_grad(): for data in trainloader: images, labels = data images, labels = images.to("cuda"), labels.to("cuda") outputs = inc(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 60000 train images: %d %%' % ( 100 * correct / total)) total,correct correct = 0 total = 0 out = [] pred = [] with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to("cuda"),labels.to("cuda") out.append(labels.cpu().numpy()) outputs= inc(images) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) out = np.concatenate(out,axis=0) pred = np.concatenate(pred,axis=0) index = np.logical_or(out ==1,out==0) print(index.shape) acc = sum(out[index] == pred[index])/sum(index) print('Accuracy of the network on the 10000 test images: %d %%' % ( 100*acc)) sum(index) import random random.sample([1,2,3,4,5,6,7,8],5) # torch.save(inc.state_dict(),"/content/drive/My Drive/model_simple_8000.pkl") fig = plt.figure() plt.plot(loss_curi,label="loss_Curve") plt.xlabel("epochs") plt.ylabel("training_loss") plt.legend() fig.savefig("loss_curve.pdf") ``` Simple Model 3 Inception Module |true training data | Corr Training Data | Test Accuracy | Test Accuracy 0-1 | | ------------------ | ------------------ | ------------- | ----------------- | | 100 | 47335 | 15 | 75 | | 500 | 47335 | 16 | 80 | | 1000 | 47335 | 17 | 83 | | 2000 | 47335 | 19 | 92 | | 4000 | 47335 | 20 | 95 | | 6000 | 47335 | 20 | 96 | | 8000 | 47335 | 20 | 96 | | 12665 | 47335 | 20 | 98 | | Total Training Data | Training Accuracy | |---------------------------- | ------------------------ | | 47435 | 100 | | 47835 | 100 | | 48335 | 100 | | 49335 | 100 | | 51335 | 100 | | 53335 | 100 | | 55335 | 100 | | 60000 | 100 | Mini- Inception network 8 Inception Modules |true training data | Corr Training Data | Test Accuracy | Test Accuracy 0-1 | | ------------------ | ------------------ | ------------- | ----------------- | | 100 | 47335 | 14 | 69 | | 500 | 47335 | 19 | 90 | | 1000 | 47335 | 19 | 92 | | 2000 | 47335 | 20 | 95 | | 4000 | 47335 | 20 | 97 | | 6000 | 47335 | 20 | 97 | | 8000 | 47335 | 20 | 98 | | 12665 | 47335 | 20 | 99 | | Total Training Data | Training Accuracy | |---------------------------- | ------------------------ | | 47435 | 100 | | 47835 | 100 | | 48335 | 100 | | 49335 | 100 | | 51335 | 100 | | 53335 | 100 | | 55335 | 100 | | 60000 | 100 | ``` ```
github_jupyter
# Statistical analysis on NEMSIS ## BMI 6106 - Final Project #### Project by: Anwar Alsanea <br> Luz Gabriela Iorg <br> Jorge Rojas <br> ## Abstract <br> The National Emergency Medical Services Information System (NEMSIS) is a national database that contains Emergency Medical Services (EMS) data collected for the United States. In this project, we are adressing multiple questions to determine trends and major factors in EMS patient care. Objectives included predicting gender and age based on other factors such as incident location and type of injury. In order to approach our objectives, statistical analysis were applied using the program R. Analysis included linear regressions and principle component analysis. Our results show no significance when it comes to predicting gender based on factors. As for age, some factors were significant in prediciting the patient's age. Component analysis show low variane across all factors included in this data. We conclude that more data points with more numerical variable should be included and analyzed to provide better EMS patient care. Further analysis is needed to conclude the best approach to better determine EMS patient care with more data. ## Introduction The National Emergency Medical Services Information System (NEMSIS) is a national database that contains Emergency Medical Services (EMS) data collected for the United States. The data holds information on patient care from 9-1-1 calls. The goal of NEMSIS in collecting this database across the states to evaluate and analyze EMS needs and improve the performance of patient care (NEMSIS, 2017). The dataset is collected for all states, however the data does not specify which state it was collected. We are unable to compare between states. In this project, we are adressing multiple questions to determine trends and major factors in EMS patient care. Our first objective was to examine how the parameters related to gender. Which of the parameters or factors had the highest effect on gender? Did gender play an important role to help in determining better EMS patient care? Our second objective was to examine the patient's age to the factors. Can we use age to assist in determinig the best approach to EMS patient care? Fianlly, we analyzed the data as a whole, and determined if any factors of paramteres should be highly considered when developing new EMA patient care procedures. ## Methods In order to approach each of our objectives, statistical analysis were applied to determine each objective. >### Data: Data was obtained from US Department of Transportation National Highway Traffic Safety Administration (NHTSA) (NHSTA, 2006). The data was first imported to Microsoft SQL Server to clean up the delimeters. Other changes included changing all column names to be able to use them in R easily. The original dataset contains 29 million events, in which were narrowed down to 10,000 events for simplification. >The data was saved as a csv file that is included with this report. >The original data set contains 44 National elements (i.e.parameters, factors). However in this analysis we were interested in analysing and examining 8 of the elements that included: >- Age >- Gender >- Primary method of payment >- Incident location type >- Primary symptom >- Cause of injury >- Incident patient disposition >- Complaint reported by dispatch >Each parameter is explained below: ><font color = blue> *Age* (age.in.years) (numeric) </font> This column was calculated from Date of Birth provided in the original dataset, this conversion was performed in SQL. ><font color = blue>*Gender* (categorical)</font> The original data provided gender in terms values that represented each gender (655 for female and 650 for male). These values were taken in R and converted to strings of "male and "female" ><font color = blue>*primary.method.of.payment* (categorical)</font> values and their representations are as follows: >- 720 Insurance >- 725 Medicaid >- 730 Medicare >- 735 Not billed >- 740 Other government >- 745 Self pay >- 750 Workes compensaiont ><font color = blue>*incident.location.type* (categorical)</font> values and their representations are as follows: >- 1135 Home or residence >- 1140 Farm >- 1145 Mine or quarry >- 1150 Industrial place >- 1155 Recreation or sport place >- 1160 street or highway >- 1165 public building >- 1170 business/resturaunts >- 1175 Health care facility (hospital, clinic, nursing homes) >- 1180 Nursing homes or jail >- 1185 Lake, river, ocean >- 1190 all other locations ><font color = blue>*primary.symptom* (categorical)</font> values and their representations are as follows: >- 1405 Bleeding >- 1410 Breathing Problem >- 1415 Change in responsiveness >- 1425 Death >- 1420 Choking >- 1430 Device/Equipment Problem >- 1435 Diarrhea >- 1440 Drainage/Discharge >- 1445 Fever >- 1450 Malaise >- 1455 Mass/Lesion >- 1460 Mental/Psych >- 1465 Nausea/Vomiting >- 1470 None >- 1475 Pain >- 1480 Palpitations >- 1485 Rash/Itching >- 1490 Swelling >- 1495 Transport Only >- 1505 Wound >- 1500 Weakness ><font color = blue>*cause.of.injury* (categorical)</font> values and their representations are as follows: >- 1885 Bites (E906.0) >- 9505 Bicycle Accident (E826.0) >- 9520 Child battering (E967.0) >- 9530 Drug poisoning (E85X.0) >- 9540 Excessive Cold (E901.0) >- 9550 Falls (E88X.0) >- 9560 Firearm assault (E965.0) >- 9570 Firearm self inflicted (E955.0) >- 9580 Machinery accidents (E919.0) >- 9590 Motor Vehicle non-traffic accident (E82X.0) >- 9600 Motorcycle Accident (E81X.1) >- 9610 Pedestrian traffic accident (E814.0) >- 9620 Rape (E960.1) >- 9630 Stabbing/Cutting Accidental (E986.0) >- 9640 Struck by Blunt/Thrown Object (E968.2) >- 9650 Water Transport accident (E83X.0) >- 9500 Aircraft related accident (E84X.0) >- 9515 Chemical poisoning (E86X.0) >- 9525 Drowning (E910.0) >- 9535 Electrocution (non-lightning) (E925.0) >- 9545 Excessive Heat (E900.0) >- 9555 Fire and Flames (E89X.0) >- 9565 Firearm injury (accidental) (E985.0) >- 9575 Lightning (E907.0) >- 9585 Mechanical Suffocation (E913.0) >- 9595 Motor Vehicle traffic accident (E81X.0) >- 9605 Non-Motorized Vehicle Accident (E848.0) >- 9615 Radiation exposure (E926.0) >- 9625 Smoke Inhalation (E89X.2) >- 9635 Stabbing/Cutting Assault >- 9645 Venomous stings (plants, animals) (E905.0) ><font color = blue>*incident.patient.disposition* (categorical)</font> values and their representations are as follows: >- 4815 Cancelled >- 4825 No Patient Found >- 4835 Patient Refused Care >- 4845 Treated, Transferred Care >- 4855 Treated, Transported by Law Enforcement >- 4820 Dead at Scene >- 4830 No Treatment Required >- 4840 Treated and Released >- 4850 Treated, Transported by EMS >- 4860 Treated, Transported by Private Vehicle ><font color = blue>*complaint.reported.by.dispatch* (categorical)</font> values and their representations are as follows: >- 400 Abdominal Pain >- 410 Animal Bite >- 420 Back Pain >- 430 Burns >- 440 Cardiac Arrest >- 450 Choking >- 460 Diabetic Problem >- 470 Electrocution >- 480 Fall Victim >- 490 Heart Problems >- 500 Hemorrhage/Laceration 510 Ingestion/Poisoning >- 520 Psychiatric Problem >- 530 Stab/Gunshot Wound >- 540 Traffic Accident >- 550 Unconscious/Fainting >- 560 Transfer/Interfacility/Palliative Care >- 405 Allergies >- 415 Assault >- 425 Breathing Problem >- 435 CO Poisoning/Hazmat >- 445 Chest Pain >- 455 Convulsions/Seizure >- 465 Drowning >- 475 Eye Problem >- 485 Headache >- 495 Heat/Cold Exposure >- 505 Industrial Accident/Inaccessible Incident/Other Entrapments (non-vehicle) >- 515 Pregnancy/Childbirth >- 525 Sick Person >- 535 Stroke/CVA >- 545 Traumatic Injury >- 555 Unknown Problem Man Down 565 MCI (Mass Casualty Incident) > ### Statistical Analysis: Statistical analysis and visual representations were produced using the program R (R Development Core Team, 2008). Packages used in this report were: - FactoMineR - factoextra - corrplot - dplyr - ggplot2 - modelr - PCAmixdata >A code is included in this report to install those packages if needed. > #### tests: Linear models and regression were used to approach the first two objectives. Principle component analysis was used to analyze the data as a whole. ## Results and Discussion The goal of this study was to analyze a subset of characteristics or factors from the NEMSIS 911 call events. ### Code Outline: - Input and output data - Create vectors, handle variables, and perform other basic functions (remove NAs) - Tackle data structures such as matrices, lists, factors, and data frames - Build statistical models with linear regressions and analysis of variance - Create a variety of graphic displays - Finding clusters in data ### Packages and libraries installation: ``` install.packages(c("FactoMineR", "factoextra")) install.packages("corrplot") install.packages("PCAmixdata") library(dplyr) library(ggplot2) library(gridExtra) library("FactoMineR") library("corrplot") library("factoextra") library(modelr) library(broom) library("PCAmixdata") require(stats) #require(pls) ``` ### Data: The file to import is saved under the name: events_cleaned_v3.txt The code below is to import the data to the notebook ``` events = read.table(file = "events_cleaned_v3.txt", sep="|", header = TRUE, stringsAsFactors = F) head(events, n=7) #dim(events) ``` ### Data cleaning: - Create vectors, handle variables, and perform other basic functions (remove NAs) ``` event1 = select(events, age.in.years, gender, primary.method.of.payment, incident.location.type, primary.symptom, cause.of.injury, incident.patient.disposition, complaint.reported.by.dispatch ) event1[event1 < 0] <- NA #head(event1, n=50) event2 = na.exclude(event1) dim(event2) head(event2) ``` - Tackle data structures manipulation such as matrices, lists, factors, and data frames. ``` str(event2) #Converting gender as factor: event2$gender <-as.factor(event2$gender) levels(event2$gender) <- c("male", "female") #Converting dataframe as factor: event2 <- data.frame(lapply(event2, as.factor)) #Converting age.in.years as numeric: event2$age.in.years <-as.numeric(event2$age.in.years) #Checking summaries summary(event2) contrasts(event2$gender) head(event2) str(event2) ``` ### Data Analysis: Build statistical models with linear regressions and analysis of variance #### Regressions: For our analysis, we are using the standard cut off at alpha = 0.05 #### Linear regression to predict gender : The first test is a generalized linear model to predict gender based on the remanining factors. The Null hypothesis is that the factors have no effect on gender. The alternative hypothesis is that there is an effect. ``` model = glm(gender ~. -gender, data= event2, family= binomial) summary(model) #Gender (outcome variable, Y) and the rest of the variables (predictors, X) #Null hypothesis (H0): the coefficients are equal to zero (i.e., no relationship between x and y) #Alternative Hypothesis (Ha): the coefficients are not equal to zero (i.e., there is some relationship between x and y) #There is not enough evidence to say that there is a relationship between gender and the predictors. #the p-values for the intercept and the predictor variable are not significant, We can NOT reject the null hypothesis. ##further Interpretation: #From the P value numbers we can say that only primary.method.of.payment745 (Self Pay) and #primary.symptom(1500 and 1505) are significantly associated with the caller’s gender. #All of the other variables do not seem to show any relationship to the caller’s gender. #The coefficient estimate of the variable primary.method.of.payment745 is b = -1.779e+00, which is negative. #This means that a if the caller (or patient) is Self Pay, then #it is associated with a decreased probability of being a female. #primary.symptom1500 (Weakness) b = 1.411e+00 which is positive. #primary.symptom1505 (Wound) b = 1.543e+00 which is positive. #This means that symptoms of weakness and wounds are #associated with a increased probability of being a female. #BUT IT IS NOT TRUE BECAUSE THERE IS NOT A SIGNIFICANT ASSOCIATION AMONG VARIABLES! #*authors notes and future research needed to prove such claims. ``` Linear regression results show that most factors had a P > 0.05, in which we have to accept the null hypothesis that the factors do not have an effect on gender and cannot predict gender. Except for primary.method.of.payment745 (which is Self Pay) and primary.symptom(1500 and 1505) are significantly associated with the caller’s gender (ie P < 0.05). All of the other variables do not show any effect on the caller’s gender ( P > 0.05). The coefficient estimate of the variable primary.method.of.payment745 is b = -1.779e+00, which is negative. This means that a if the caller (or patient) is Self Paid, then it is associated with a decreased probability of being a female. However this coeffiecient is still too low to have significance. primary.symptom1500 (Weakness) b = 1.411e+00 which is positive. primary.symptom1505 (Wound) b = 1.543e+00 which is positive. This means that symptoms of weakness and wounds are associated with a increased probability of being a female. Again, the values are too low to be significant. We however conclude that this information is not sufficient to assist EMS patient care procedures, further data needs to be collected in order to determine if EMS patient care can be improved by gender as a dependent variable. #### Linear regression to predict age : Our second test is to predict using age as the independent variable. The null hypothesis is that age cannot be predicted by the other variable. The alternative hypothesis is that other variables can act as independent variable that can predict age. ``` model2 = lm(age.in.years ~. -age.in.years, data= event2) summary(model2) #age.in.years (outcome variable, Y) and the rest of the variables (predictors, X) #Null hypothesis (H0): the coefficients are equal to zero (i.e., no relationship between x and y) #Alternative Hypothesis (Ha): the coefficients are not equal to zero (i.e., there is some relationship between x and y) #There is enough evidence to say that there is a weak association between age.in.years and the predictors. #the p-values for the intercept and the predictor variable are slightly significant, We can reject the null hypothesis. ``` Our results show that there are more factors having an effect on age than gender did. Primary methods 725 and 730 (medicaid and medicare) had high significance at P << 0.05, in which we reject the null hypothesis. This result is expected as medicaid is generally for younger ages while medicare is health care for ages > 65. Primary symptom 1500 (weakness) was significant towards age at P < 0.05. Weakness is a symptom that can explain more than one condition, it is however mostly used to describe symptoms experienced with older age. We suggest that further information is included in the primary symptom factor to be able to accurately examine and develop enhanced EMS patient care services. Cause of injury 9565 and 9605 (fire arm injury and non-motorized vehicle accident respectively) have shown high significance according to age at P < 0.05 in which we reject the null hypothesis. Other factors had a P value > 0.05 in which we accept the null hypothesis that they have no effect on age. ##### Regression assumptions: ``` par(mfrow = c(2, 2)) plot(model2) #### Linearity of the data (Residuals vs Fitted). #There is no pattern in the residual plot. This suggests that we can assume linear relationship #between the predictors and the outcome variables. #### Normality of residuals (Normal Q-Q plot). #All the points fall approximately along the reference line, so we can assume normality. #### Homogeneity of residuals variance (Scale-Location). #It can be seen that the variability (variances) of the residual points does not quite follows a horizontal #line with equally spread points, suggesting non-constant variances in the residuals errors #(or the presence of some heteroscedasticity). #To reduce the heteroscedasticity problem we used the log transformation of the outcome variable (age.in.years, (y)). #model3 = lm(log(age.in.years) ~. -age.in.years, data= event2) #### Independence of residuals error terms (Residuals vs Leverage). #There are not drastic outliers in our data. ``` ##### Linearity of the data (Residuals vs Fitted plot): There is no pattern in the residual plot. This suggests that we can assume linear relationship between the predictors and the outcome variables. ##### Normality of residuals (Normal Q-Q plot): All the points fall approximately along the reference line, so we can assume normality. ##### Homogeneity of residuals variance (Scale-Location): It can be seen that the variability (variances) of the residual points does not quite follows a horizontal line with equally spread points, suggesting non-constant variances in the residuals errors (or the presence of some heteroscedasticity). To reduce the heteroscedasticity problem we used the log transformation of the outcome variable (age.in.years, (y)). Shown next. ##### Independence of residuals error terms (Residuals vs Leverage): There are not drastic outliers in our data. ##### Reducing heteroscedasticity: ``` #Transformed Regression and new plot: model3 = lm(log(age.in.years) ~. -age.in.years, data= event2) plot(model3, 3) #heteroscedasticity has been improved. ``` #### Linear regression for age after log transformation: After the noticeable reduced heteroscedasticity in the data after using the log transformation, we examine the linear model again: ``` summary(model3) #After the log transformation of age, the p-values for the intercept and the predictor variables has #become more significant, hence indicating a stronger association between age.in.years and the predictors. ###Interpretation: #From the P value numbers we can say that primary.method.of.payment(725 and 730), #incident.location.type1170, primary.symptom (1410 and 1500), cause.of.injury(9565, 9600, and 9605), #and complaint.reported.by.dispatch(485 and 520), are significantly associated with the caller’s Age. #The coefficient estimate of the variables are: #primary.method.of.payment725 (Medicaid) b = -0.183656, which is negative. #primary.method.of.payment730(Medicare) b = 0.476600 which is positive. #This means that as age increases the probability of being on Medicaid decreases; #but the probability of being on Medicare increases as age increases. #incident.location.type1170(Trade or service (business, bars, restaurants, etc)) b = 0.396803, which is positive. #This means that as age increases the probability of the incident happening at a business, bars, #restaurants, etc., increases. #primary.symptom1410 (Breathing Problem) b = -0.854654, which is negative. #This means that Breathing Problem are more prevalent among younger people (perhaps among babies). #primary.symptom1500 (Weakness) b = 0.370141 which is positive. #This means that as age increases the probability of the primary symptom being "Weakness" increases. #complaint.reported.by.dispatch485(Headache) b = -2.192445, which is negative. #complaint.reported.by.dispatch520(Psychiatric Problem) b = -1.606781, which is negative. #This means that if the complaint.reported.by.dispatch is a "Headache" or a "Psychiatric Problem", #is associated with an increased probability of being a younger person. #cause.of.injury9565 (Firearm injury) b = -2.458792, which is negative. #cause.of.injury9505 (Bicycle Accident) b = -2.166411, which is negative. ##cause.of.injury9600 (Motorcycle Accident) b = -1.344680, which is negative. #This means that accidents involving Firearms, Motorcycle, and Bicycles are more prevalent among younger people. ``` After the log transformation of age, the p-values for the intercept and the predictor variables has become more significant, hence indicating a stronger association between age.in.years and the predictors. *Interpretation:* From the P value numbers we can say that primary method of payment(725 and 730), incident location type 1170, primary symptom (1410 and 1500), cause of injury(9565, 9600, and 9605), and complaint reported by dispatch(485 and 520), are significantly associated with the caller’s Age. ( ie P << 0.05, in which we reject the null hypothesis). *The coefficient estimate of the variables are: * primary.method.of.payment725 (Medicaid) b = -0.183656, which is negative. primary.method.of.payment730(Medicare) b = 0.476600 which is positive. This means that as age increases the probability of being on Medicaid decreases; but the probability of being on Medicare increases as age increases. Which has been shown to be true in the first model as well. incident.location.type1170(Trade or service (business, bars, restaurants, etc)) b = 0.396803, which is positive. This means that as age increases the probability of the incident happening at a business, bars, restaurants, etc., increases. primary.symptom1410 (Breathing Problem) b = -0.854654, which is negative. This means that Breathing Problem are more prevalent among younger people (perhaps among babies). primary.symptom1500 (Weakness) b = 0.370141 which is positive. This means that as age increases the probability of the primary symptom being "Weakness" increases. complaint.reported.by.dispatch485(Headache) b = -2.192445, which is negative. complaint.reported.by.dispatch520(Psychiatric Problem) b = -1.606781, which is negative. This means that if the complaint.reported.by.dispatch is a "Headache" or a "Psychiatric Problem", is associated with an increased probability of being a younger person. cause.of.injury9565 (Firearm injury) b = -2.458792, which is negative. cause.of.injury9505 (Bicycle Accident) b = -2.166411, which is negative. cause.of.injury9600 (Motorcycle Accident) b = -1.344680, which is negative. This means that accidents involving Firearms, Motorcycle, and Bicycles are more prevalent among younger people. ###### Data visulization: In order to examine those trends, the log transfomation of age was plotted against cause of injury and incident location as shown below: <img src="./graphs/Cause-combined-log-lt95.png"> <img src="./graphs/Incident-combined-log-lt95.png"> ### Component Analysis: FAMD Our data contains both quantitative (numeric) and qualitative (categorical) variables, the best tool to analyze similarity between individuals and the association between all variables is the "Factor analysis of mixed data" (FAMD), from the FactoMineR package. Quantitative and qualitative variables are normalized during the analysis in order to balance the influence of each set of variables, (FAMD does it internally). ``` res.famd <- FAMD(event2, ncp=5, graph = FALSE) summary(res.famd) #About 5% of the variation is explained by this first eigenvalue, which is the first dimension. #Based on the contribution plots, the variables plots, and the significant categories, #we selected the next varibles for our simpler model: relevant = select(event2, age.in.years, cause.of.injury, complaint.reported.by.dispatch, primary.method.of.payment, incident.location.type, ) improv_model = lm(log(age.in.years) ~. -age.in.years, data= relevant) #summary(improv_model) #Analysis and Comparison of models: AIC(model3, improv_model) glance(model3) %>% dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value) glance(improv_model) %>% dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value) #Looking at the models' summaries, we can see that model3 and improv_model have a similar adjusted R2, #but model3's is slightly higher. This means that improv_model is a little bit better at exaining the outcome (age.in.years). #The two models have exactly the same (rounded) amount of residual standard error (RSE or sigma = 0.54). #However, improv_model is more simple than model3 because it incorporates less variables. #All things equal, the simple model is always better. #The AIC and the BIC of the improv_model are lower than those of the model3 (AIC= 527.9 vs 521.8). #In model comparison strategies, the model with the lowest AIC and BIC scores is preferred. #Finally, the F-statistic P-value of improv_model is lower than the one of the model3. #This means that the improv_model is statistically more significant compared to model3. #In this way, we can conclude that improv_model is the best model and should be used for further analyses. ``` ### Visual representations for variables: ``` #Plots for the frequency of the variables' categories for (i in 2:8) { plot(event2[,i], main=colnames(event2)[i], ylab = "Count", xlab = "Categories", col="#00AFBB", las = 2) } #Some of the variable categories have a very low frequency. These variables could distort the analysis. #scree plot a = fviz_screeplot(res.famd, addlabels = TRUE, ylim = c(0, 5)) #19% of the information (variances) contained in the data are retained by the first five principal components. #The percentage value of our variables explains less than desired of the variance; #The low frequency variables could be distorting the analysis. # Plot of variables b = fviz_famd_var(res.famd, repel = TRUE) ##It can be seen that, the variables gender, age.in.years, and incident.patient.disposition are the most correlated with dimension 1. #None of the variables are strongly correlated solely to dimension 2. # Contribution to the first dimension c = fviz_contrib(res.famd, "var", axes = 1) # Contribution to the second dimension d = fviz_contrib(res.famd, "var", axes = 2) #From the plots, it can be seen that: #variables that contribute the most to the first dimension are: cause.of.injury and complaint.reported.by.dispatch. #variables that contribute the most to the second dimension are: cause.of.injury and complaint.reported.by.dispatch. grid.arrange(a, b, c, d, ncol = 2) ``` From the plots we can see that 19% of the variances contained in the data were retained by the first five principal components. The percentage value of our variables explains low variance among the factors. Variables gender, age, and incident patient disposition are strongly correlated with dimension 1. ### Hierarchical K-means clustering: ``` df= select(relevant, age.in.years) #head(df) #Hierarchical K-means clustering hk3 <-hkmeans(df, 3) hk3$centers relevant2 = relevant relevant2$k3cluster = hk3$cluster relevant2$k3cluster <-as.factor(relevant2$k3cluster) #levels(relevant2$k3cluster) levels(relevant2$k3cluster) <- c("Child", "Young-Adult", "Adult" ) #levels(relevant2$k3cluster) #head(relevant2) res.famd2 <- FAMD(relevant2, ncp = 5, graph = FALSE) fviz_pca_ind(res.famd2, geom.ind = "point", # show points only col.ind = relevant2$k3cluster, # color by groups palette = c("#00AFBB", "#E7B800", "#FC4E07"), addEllipses = TRUE, ellipse.type = "convex", #addEllipses = TRUE, # Concentration ellipses legend.title = "Age category" ) ``` The clustering shows that children and young adults have a higher distribution across the variables or factors than adults. ## Conclusions: We have concluded that there is not enough information or data collected to improve and enhance EMS patient care procedures based on gender. As for the individual's age, there is some significance that can be used to enhance EMS patient care procedures. In this analysis, most of the variables were categorical with less events for simplicity. We suggest that more data points should be analyzed to provide better EMS patient care. Also, without geographic location, it is hard to provide the best EMS patient care without knowing where the data applies to most. Further analysis is needed to conclude the best approach to better determine EMS patient care. ### References: R Development Core Team. R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. (2008). ISBN 3-900051-07-0, URL http://www.R-project.org. NATIONAL EMS INFORMATION SYSTEM (NEMSIS) (2018). <https://nemsis.org/what-is-nemsis/> National Highway Traffic Safety Administration (NHTSA)(2008). Uniform PreHospital EMS Dataset Version 2.2.1. <<https://nemsis.org>
github_jupyter
``` from tensorflow.python.client import device_lib device_lib.list_local_devices() import time import copy import numpy as np import os import subprocess import sys import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.optim as optim from matplotlib import pyplot as plt from torch.utils.data.sampler import SubsetRandomSampler from drive.MyDrive.cords.selectionstrategies.supervisedlearning.glisterstrategy import GLISTERStrategy as Strategy from drive.MyDrive.cords.utils.models.resnet import ResNet18 from drive.MyDrive.cords.utils.custom_dataset import load_mnist_cifar from torch.utils.data import random_split, SequentialSampler, BatchSampler, RandomSampler from torch.autograd import Variable import math import tqdm def model_eval_loss(data_loader, model, criterion): total_loss = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(data_loader): inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) outputs = model(inputs) loss = criterion(outputs, targets) total_loss += loss.item() return total_loss device = "cuda" if torch.cuda.is_available() else "cpu" print("Using Device:", device) ``` #Training Arguments ``` datadir = '../../data' data_name = 'cifar10' fraction = float(0.1) num_epochs = int(300) select_every = int(20) feature = 'dss'# 70 warm_method = 0 # whether to use warmstart-onestep (1) or online (0) num_runs = 1 # number of random runs learning_rate = 0.05 ``` #Results Folder ``` all_logs_dir = './results/' + data_name +'/' + feature +'/' + str(fraction) + '/' + str(select_every) print(all_logs_dir) subprocess.run(["mkdir", "-p", all_logs_dir]) path_logfile = os.path.join(all_logs_dir, data_name + '.txt') logfile = open(path_logfile, 'w') exp_name = data_name + '_fraction:' + str(fraction) + '_epochs:' + str(num_epochs) + \ '_selEvery:' + str(select_every) + '_variant' + str(warm_method) + '_runs' + str(num_runs) print(exp_name) ``` #Loading CIFAR10 Dataset ``` print("=======================================", file=logfile) fullset, valset, testset, num_cls = load_mnist_cifar(datadir, data_name, feature) ``` #Splitting Training dataset to train and validation sets ``` validation_set_fraction = 0.1 num_fulltrn = len(fullset) num_val = int(num_fulltrn * validation_set_fraction) num_trn = num_fulltrn - num_val trainset, validset = random_split(fullset, [num_trn, num_val]) N = len(trainset) trn_batch_size = 20 ``` #Creating DataLoaders ``` trn_batch_size = 20 val_batch_size = 1000 tst_batch_size = 1000 trainloader = torch.utils.data.DataLoader(trainset, batch_size=trn_batch_size, shuffle=False, pin_memory=True) valloader = torch.utils.data.DataLoader(valset, batch_size=val_batch_size, shuffle=False, sampler=SubsetRandomSampler(validset.indices), pin_memory=True) testloader = torch.utils.data.DataLoader(testset, batch_size=tst_batch_size, shuffle=False, pin_memory=True) ``` #Budget for Data Subset Selection ``` bud = int(fraction * N) print("Budget, fraction and N:", bud, fraction, N) # Transfer all the data to GPU print_every = 3 ``` #Loading ResNet Model ``` model = ResNet18(num_cls) model = model.to(device) print(model) ``` #Initial Random Subset for Training ``` start_idxs = np.random.choice(N, size=bud, replace=False) ``` #Loss Type, Optimizer and Learning Rate Scheduler ``` criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=5e-4) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs) ``` #Last Layer GLISTER Strategy with Stcohastic Selection ``` setf_model = Strategy(trainloader, valloader, model, criterion, learning_rate, device, num_cls, False, 'Stochastic') idxs = start_idxs print("Starting Greedy Selection Strategy!") substrn_losses = np.zeros(num_epochs) fulltrn_losses = np.zeros(num_epochs) val_losses = np.zeros(num_epochs) timing = np.zeros(num_epochs) val_acc = np.zeros(num_epochs) tst_acc = np.zeros(num_epochs) full_trn_acc = np.zeros(num_epochs) subtrn_acc = np.zeros(num_epochs) subset_trnloader = torch.utils.data.DataLoader(trainset, batch_size=trn_batch_size, shuffle=False, sampler=SubsetRandomSampler(idxs), pin_memory=True) ``` #Training Loop ``` for i in tqdm.trange(num_epochs): subtrn_loss = 0 subtrn_correct = 0 subtrn_total = 0 start_time = time.time() if (((i+1) % select_every) == 0): cached_state_dict = copy.deepcopy(model.state_dict()) clone_dict = copy.deepcopy(model.state_dict()) print("selEpoch: %d, Starting Selection:" % i, str(datetime.datetime.now())) subset_start_time = time.time() subset_idxs, grads_idxs = setf_model.select(int(bud), clone_dict) subset_end_time = time.time() - subset_start_time print("Subset Selection Time is:" + str(subset_end_time)) idxs = subset_idxs print("selEpoch: %d, Selection Ended at:" % (i), str(datetime.datetime.now())) model.load_state_dict(cached_state_dict) subset_trnloader = torch.utils.data.DataLoader(trainset, batch_size=trn_batch_size, shuffle=False, sampler=SubsetRandomSampler(idxs), pin_memory=True) model.train() for batch_idx, (inputs, targets) in enumerate(subset_trnloader): inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) # targets can have non_blocking=True. optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, targets) subtrn_loss += loss.item() loss.backward() optimizer.step() _, predicted = outputs.max(1) subtrn_total += targets.size(0) subtrn_correct += predicted.eq(targets).sum().item() scheduler.step() timing[i] = time.time() - start_time #print("Epoch timing is: " + str(timing[i])) val_loss = 0 val_correct = 0 val_total = 0 tst_correct = 0 tst_total = 0 tst_loss = 0 full_trn_loss = 0 #subtrn_loss = 0 full_trn_correct = 0 full_trn_total = 0 model.eval() with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): #print(batch_idx) inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) outputs = model(inputs) loss = criterion(outputs, targets) val_loss += loss.item() _, predicted = outputs.max(1) val_total += targets.size(0) val_correct += predicted.eq(targets).sum().item() for batch_idx, (inputs, targets) in enumerate(testloader): #print(batch_idx) inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) outputs = model(inputs) loss = criterion(outputs, targets) tst_loss += loss.item() _, predicted = outputs.max(1) tst_total += targets.size(0) tst_correct += predicted.eq(targets).sum().item() for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) outputs = model(inputs) loss = criterion(outputs, targets) full_trn_loss += loss.item() _, predicted = outputs.max(1) full_trn_total += targets.size(0) full_trn_correct += predicted.eq(targets).sum().item() val_acc[i] = val_correct/val_total tst_acc[i] = tst_correct/tst_total subtrn_acc[i] = subtrn_correct/subtrn_total full_trn_acc[i] = full_trn_correct/full_trn_total substrn_losses[i] = subtrn_loss fulltrn_losses[i] = full_trn_loss val_losses[i] = val_loss print('Epoch:', i + 1, 'SubsetTrn,FullTrn,ValLoss,Time:', subtrn_loss, full_trn_loss, val_loss, timing[i]) ``` #Results Logging ``` print("SelectionRun---------------------------------") print("Final SubsetTrn and FullTrn Loss:", subtrn_loss, full_trn_loss) print("Validation Loss and Accuracy:", val_loss, val_acc[-1]) print("Test Data Loss and Accuracy:", tst_loss, tst_acc[-1]) print('-----------------------------------') print("GLISTER", file=logfile) print('---------------------------------------------------------------------', file=logfile) val = "Validation Accuracy," tst = "Test Accuracy," time_str = "Time," for i in range(num_epochs): time_str = time_str + "," + str(timing[i]) val = val + "," + str(val_acc[i]) tst = tst + "," + str(tst_acc[i]) print(timing, file=logfile) print(val, file=logfile) print(tst, file=logfile) ``` #Full Data Training ``` torch.manual_seed(42) np.random.seed(42) model = ResNet18(num_cls) model = model.to(device) idxs = start_idxs criterion = nn.CrossEntropyLoss() #optimizer = optim.SGD(model.parameters(), lr=learning_rate) optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=5e-4) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs) print("Starting Full Training Run!") substrn_losses = np.zeros(num_epochs) fulltrn_losses = np.zeros(num_epochs) val_losses = np.zeros(num_epochs) subset_trnloader = torch.utils.data.DataLoader(trainset, batch_size=trn_batch_size, shuffle=False, sampler=SubsetRandomSampler(idxs), pin_memory=True) timing = np.zeros(num_epochs) val_acc = np.zeros(num_epochs) tst_acc = np.zeros(num_epochs) full_trn_acc = np.zeros(num_epochs) subtrn_acc = np.zeros(num_epochs) ``` #Full Training Loop ``` for i in tqdm.trange(num_epochs): start_time = time.time() model.train() for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) # Variables in Pytorch are differentiable. inputs, target = Variable(inputs), Variable(inputs) # This will zero out the gradients for this batch. optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() scheduler.step() timing[i] = time.time() - start_time val_loss = 0 val_correct = 0 val_total = 0 tst_correct = 0 tst_total = 0 tst_loss = 0 full_trn_loss = 0 subtrn_loss = 0 full_trn_correct = 0 full_trn_total = 0 subtrn_correct = 0 subtrn_total = 0 model.eval() with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): # print(batch_idx) inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) outputs = model(inputs) loss = criterion(outputs, targets) val_loss += loss.item() _, predicted = outputs.max(1) val_total += targets.size(0) val_correct += predicted.eq(targets).sum().item() for batch_idx, (inputs, targets) in enumerate(testloader): # print(batch_idx) inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) outputs = model(inputs) loss = criterion(outputs, targets) tst_loss += loss.item() _, predicted = outputs.max(1) tst_total += targets.size(0) tst_correct += predicted.eq(targets).sum().item() for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) outputs = model(inputs) loss = criterion(outputs, targets) full_trn_loss += loss.item() _, predicted = outputs.max(1) full_trn_total += targets.size(0) full_trn_correct += predicted.eq(targets).sum().item() for batch_idx, (inputs, targets) in enumerate(subset_trnloader): inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) outputs = model(inputs) loss = criterion(outputs, targets) subtrn_loss += loss.item() _, predicted = outputs.max(1) subtrn_total += targets.size(0) subtrn_correct += predicted.eq(targets).sum().item() val_acc[i] = val_correct / val_total tst_acc[i] = tst_correct / tst_total subtrn_acc[i] = subtrn_correct / subtrn_total full_trn_acc[i] = full_trn_correct / full_trn_total substrn_losses[i] = subtrn_loss fulltrn_losses[i] = full_trn_loss val_losses[i] = val_loss print('Epoch:', i + 1, 'SubsetTrn,FullTrn,ValLoss,Time:', subtrn_loss, full_trn_loss, val_loss, timing[i]) ``` #Results and Timing Logging ``` print("SelectionRun---------------------------------") print("Final SubsetTrn and FullTrn Loss:", subtrn_loss, full_trn_loss) print("Validation Loss and Accuracy:", val_loss, val_acc[-1]) print("Test Data Loss and Accuracy:", tst_loss, tst_acc[-1]) print('-----------------------------------') print("Full Training", file=logfile) print('---------------------------------------------------------------------', file=logfile) val = "Validation Accuracy," tst = "Test Accuracy," time_str = "Time," for i in range(num_epochs): time_str = time_str + "," + str(timing[i]) val = val + "," + str(val_acc[i]) tst = tst + "," + str(tst_acc[i]) print(timing, file=logfile) print(val, file=logfile) print(tst, file=logfile) logfile.close() ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import style import matplotlib.ticker as ticker import seaborn as sns from sklearn.datasets import load_boston from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import f1_score from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import ParameterGrid from sklearn.inspection import permutation_importance import multiprocessing labels = pd.read_csv('../../csv/train_labels.csv') labels.head() values = pd.read_csv('../../csv/train_values.csv') values.T #Promedio de altura por piso values['height_percentage_per_floor_pre_eq'] = values['height_percentage']/values['count_floors_pre_eq'] values['volume_percentage'] = values['area_percentage'] * values['height_percentage'] #Algunos promedios por localizacion values['avg_age_for_geo_level_2_id'] = values.groupby('geo_level_2_id')['age'].transform('mean') values['avg_area_percentage_for_geo_level_2_id'] = values.groupby('geo_level_2_id')['area_percentage'].transform('mean') values['avg_height_percentage_for_geo_level_2_id'] = values.groupby('geo_level_2_id')['height_percentage'].transform('mean') values['avg_count_floors_for_geo_level_2_id'] = values.groupby('geo_level_2_id')['count_floors_pre_eq'].transform('mean') values['avg_age_for_geo_level_3_id'] = values.groupby('geo_level_3_id')['age'].transform('mean') values['avg_area_percentage_for_geo_level_3_id'] = values.groupby('geo_level_3_id')['area_percentage'].transform('mean') values['avg_height_percentage_for_geo_level_3_id'] = values.groupby('geo_level_3_id')['height_percentage'].transform('mean') values['avg_count_floors_for_geo_level_3_id'] = values.groupby('geo_level_3_id')['count_floors_pre_eq'].transform('mean') #Superestructuras superstructure_cols = [i for i in values.filter(regex='^has_superstructure*').columns] values["num_superstructures"] = values[superstructure_cols[0]] for c in superstructure_cols[1:]: values["num_superstructures"] += values[c] values['has_superstructure'] = values['num_superstructures'] != 0 #Familias por unidad de area y volumen y por piso values['family_area_relation'] = values['count_families'] / values['area_percentage'] values['family_volume_relation'] = values['count_families'] / values['volume_percentage'] values['family_floors_relation'] = values['count_families'] / values['count_floors_pre_eq'] #Relacion material(los mas importantes segun el modelo 5)-antiguedad values['20_yr_age_range'] = values['age'] // 20 * 20 values['20_yr_age_range'] = values['20_yr_age_range'].astype('str') values['superstructure'] = '' values['superstructure'] = np.where(values['has_superstructure_mud_mortar_stone'], values['superstructure'] + 'b', values['superstructure']) values['superstructure'] = np.where(values['has_superstructure_cement_mortar_brick'], values['superstructure'] + 'e', values['superstructure']) values['superstructure'] = np.where(values['has_superstructure_timber'], values['superstructure'] + 'f', values['superstructure']) values['age_range_superstructure'] = values['20_yr_age_range'] + values['superstructure'] del values['20_yr_age_range'] del values['superstructure'] values values.isnull().values.any() labels.isnull().values.any() values.dtypes values["building_id"].count() == values["building_id"].drop_duplicates().count() values.info() to_be_categorized = ["land_surface_condition", "foundation_type", "roof_type",\ "position", "ground_floor_type", "other_floor_type",\ "plan_configuration", "legal_ownership_status", "age_range_superstructure"] for row in to_be_categorized: values[row] = values[row].astype("category") values.info() datatypes = dict(values.dtypes) for row in values.columns: if datatypes[row] != "int64" and datatypes[row] != "int32" and \ datatypes[row] != "int16" and datatypes[row] != "int8": continue if values[row].nlargest(1).item() > 32767 and values[row].nlargest(1).item() < 2**31: values[row] = values[row].astype(np.int32) elif values[row].nlargest(1).item() > 127: values[row] = values[row].astype(np.int16) else: values[row] = values[row].astype(np.int8) values.info() labels.info() labels["building_id"] = labels["building_id"].astype(np.int32) labels["damage_grade"] = labels["damage_grade"].astype(np.int8) labels.info() ``` # Nuevo Modelo ``` important_values = values\ .merge(labels, on="building_id") important_values.drop(columns=["building_id"], inplace = True) important_values["geo_level_1_id"] = important_values["geo_level_1_id"].astype("category") important_values important_values.shape X_train, X_test, y_train, y_test = train_test_split(important_values.drop(columns = 'damage_grade'), important_values['damage_grade'], test_size = 0.2, random_state = 123) #OneHotEncoding def encode_and_bind(original_dataframe, feature_to_encode): dummies = pd.get_dummies(original_dataframe[[feature_to_encode]]) res = pd.concat([original_dataframe, dummies], axis=1) res = res.drop([feature_to_encode], axis=1) return(res) features_to_encode = ["geo_level_1_id", "land_surface_condition", "foundation_type", "roof_type",\ "position", "ground_floor_type", "other_floor_type",\ "plan_configuration", "legal_ownership_status", "age_range_superstructure"] for feature in features_to_encode: X_train = encode_and_bind(X_train, feature) X_test = encode_and_bind(X_test, feature) X_train X_train.shape # # Busco los mejores tres parametros indicados abajo. # n_estimators = [65, 100, 135] # max_features = [0.2, 0.5, 0.8] # max_depth = [None, 2, 5] # min_samples_split = [5, 15, 25] # # min_impurity_decrease = [0.0, 0.01, 0.025, 0.05, 0.1] # # min_samples_leaf # hyperF = {'n_estimators': n_estimators, # 'max_features': max_features, # 'max_depth': max_depth, # 'min_samples_split': min_samples_split # } # gridF = GridSearchCV(estimator = RandomForestClassifier(random_state = 123), # scoring = 'f1_micro', # param_grid = hyperF, # cv = 3, # verbose = 1, # n_jobs = -1) # bestF = gridF.fit(X_train, y_train) # res = pd.DataFrame(bestF.cv_results_) # res.loc[res['rank_test_score'] <= 10] # Utilizo los mejores parametros segun el GridSearch rf_model = RandomForestClassifier(n_estimators = 150, max_depth = None, max_features = 50, min_samples_split = 15, min_samples_leaf = 1, criterion = "gini", verbose=True) rf_model.fit(X_train, y_train) rf_model.score(X_train, y_train) # Calculo el F1 score para mi training set. y_preds = rf_model.predict(X_test) f1_score(y_test, y_preds, average='micro') test_values = pd.read_csv('../../csv/test_values.csv', index_col = "building_id") test_values test_values_subset = test_values test_values_subset["geo_level_1_id"] = test_values_subset["geo_level_1_id"].astype("category") test_values_subset #Promedio de altura por piso test_values_subset['height_percentage_per_floor_pre_eq'] = test_values_subset['height_percentage']/test_values_subset['count_floors_pre_eq'] test_values_subset['volume_percentage'] = test_values_subset['area_percentage'] * test_values_subset['height_percentage'] #Algunos promedios por localizacion test_values_subset['avg_age_for_geo_level_2_id'] = test_values_subset.groupby('geo_level_2_id')['age'].transform('mean') test_values_subset['avg_area_percentage_for_geo_level_2_id'] = test_values_subset.groupby('geo_level_2_id')['area_percentage'].transform('mean') test_values_subset['avg_height_percentage_for_geo_level_2_id'] = test_values_subset.groupby('geo_level_2_id')['height_percentage'].transform('mean') test_values_subset['avg_count_floors_for_geo_level_2_id'] = test_values_subset.groupby('geo_level_2_id')['count_floors_pre_eq'].transform('mean') test_values_subset['avg_age_for_geo_level_3_id'] = test_values_subset.groupby('geo_level_3_id')['age'].transform('mean') test_values_subset['avg_area_percentage_for_geo_level_3_id'] = test_values_subset.groupby('geo_level_3_id')['area_percentage'].transform('mean') test_values_subset['avg_height_percentage_for_geo_level_3_id'] = test_values_subset.groupby('geo_level_3_id')['height_percentage'].transform('mean') test_values_subset['avg_count_floors_for_geo_level_3_id'] = test_values_subset.groupby('geo_level_3_id')['count_floors_pre_eq'].transform('mean') #Superestructuras superstructure_cols = [i for i in test_values_subset.filter(regex='^has_superstructure*').columns] test_values_subset["num_superstructures"] = test_values_subset[superstructure_cols[0]] for c in superstructure_cols[1:]: test_values_subset["num_superstructures"] += test_values_subset[c] test_values_subset['has_superstructure'] = test_values_subset['num_superstructures'] != 0 #Familias por unidad de area y volumen y por piso test_values_subset['family_area_relation'] = test_values_subset['count_families'] / test_values_subset['area_percentage'] test_values_subset['family_volume_relation'] = test_values_subset['count_families'] / test_values_subset['volume_percentage'] test_values_subset['family_floors_relation'] = test_values_subset['count_families'] / test_values_subset['count_floors_pre_eq'] #Relacion material(los mas importantes segun el modelo 5)-antiguedad test_values_subset['20_yr_age_range'] = test_values_subset['age'] // 20 * 20 test_values_subset['20_yr_age_range'] = test_values_subset['20_yr_age_range'].astype('str') test_values_subset['superstructure'] = '' test_values_subset['superstructure'] = np.where(test_values_subset['has_superstructure_mud_mortar_stone'], test_values_subset['superstructure'] + 'b', test_values_subset['superstructure']) test_values_subset['superstructure'] = np.where(test_values_subset['has_superstructure_cement_mortar_brick'], test_values_subset['superstructure'] + 'e', test_values_subset['superstructure']) test_values_subset['superstructure'] = np.where(test_values_subset['has_superstructure_timber'], test_values_subset['superstructure'] + 'f', test_values_subset['superstructure']) test_values_subset['age_range_superstructure'] = test_values_subset['20_yr_age_range'] + test_values_subset['superstructure'] del test_values_subset['20_yr_age_range'] del test_values_subset['superstructure'] test_values_subset def encode_and_bind(original_dataframe, feature_to_encode): dummies = pd.get_dummies(original_dataframe[[feature_to_encode]]) res = pd.concat([original_dataframe, dummies], axis=1) res = res.drop([feature_to_encode], axis=1) return(res) features_to_encode = ["geo_level_1_id", "land_surface_condition", "foundation_type", "roof_type",\ "position", "ground_floor_type", "other_floor_type",\ "plan_configuration", "legal_ownership_status", "age_range_superstructure"] for feature in features_to_encode: test_values_subset = encode_and_bind(test_values_subset, feature) test_values_subset features_in_model_not_in_tests =\ list(filter(lambda col: col not in test_values_subset.columns.to_list(), X_train.columns.to_list())) for f in features_in_model_not_in_tests: test_values_subset[f] = 0 test_values_subset.drop(columns = list(filter(lambda col: col not in X_train.columns.to_list() , test_values_subset.columns.to_list())), inplace = True) test_values_subset.shape # Genero las predicciones para los test. preds = rf_model.predict(test_values_subset) submission_format = pd.read_csv('../../csv/submission_format.csv', index_col = "building_id") my_submission = pd.DataFrame(data=preds, columns=submission_format.columns, index=submission_format.index) my_submission.head() my_submission.to_csv('../../csv/predictions/jf/8/jf-model-8-submission.csv') !head ../../csv/predictions/jf/8/jf-model-8-submission.csv ```
github_jupyter
# Binary classification from 2 features using K Nearest Neighbors (KNN) Classification using "raw" python or libraries. The binary classification is on a single boundary defined by a continuous function and added white noise ``` import numpy as np from numpy import random import matplotlib.pyplot as plt import matplotlib.colors as pltcolors from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier as SkKNeighborsClassifier import pandas as pd import seaborn as sns ``` ## Model Quadratic function as boundary between positive and negative values Adding some unknown as a Gaussian noise The values of X are uniformly distributed and independent ``` # Two features, Gaussian noise def generateBatch(N): # xMin = 0 xMax = 1 b = 0.1 std = 0.1 # x = random.uniform(xMin, xMax, (N, 2)) # 4th degree relation to shape the boundary boundary = 2*(x[:,0]**4 + (x[:,0]-0.3)**3 + b) # Adding some gaussian noise labels = boundary + random.normal(0, std, N) > x[:,1] return (x, labels) ``` ### Training data ``` N = 2000 # x has 1 dim in R, label has 1 dim in B xTrain, labelTrain = generateBatch(N) colors = ['blue','red'] fig = plt.figure(figsize=(15,4)) plt.subplot(1,3,1) plt.scatter(xTrain[:,0], xTrain[:,1], c=labelTrain, cmap=pltcolors.ListedColormap(colors), marker=',', alpha=0.1) plt.xlabel('x0') plt.ylabel('x1') plt.title('Generated train data') plt.grid() cb = plt.colorbar() loc = np.arange(0,1,1/float(len(colors))) cb.set_ticks(loc) cb.set_ticklabels([0,1]) plt.subplot(1,3,2) plt.scatter(xTrain[:,0], labelTrain, marker=',', alpha=0.01) plt.xlabel('x0') plt.ylabel('label') plt.grid() plt.subplot(1,3,3) plt.scatter(xTrain[:,1], labelTrain, marker=',', alpha=0.01) plt.xlabel('x1') plt.ylabel('label') plt.grid() count, bins, ignored = plt.hist(labelTrain*1.0, 10, density=True, alpha=0.5) p = np.mean(labelTrain) print('Bernouilli parameter of the distribution:', p) ``` ### Test data for verification of the model ``` xTest, labelTest = generateBatch(N) testColors = ['navy', 'orangered'] ``` # Helpers ``` def plotHeatMap(X, classes, title=None, fmt='.2g', ax=None, xlabel=None, ylabel=None): """ Fix heatmap plot from Seaborn with pyplot 3.1.0, 3.1.1 https://stackoverflow.com/questions/56942670/matplotlib-seaborn-first-and-last-row-cut-in-half-of-heatmap-plot """ ax = sns.heatmap(X, xticklabels=classes, yticklabels=classes, annot=True, fmt=fmt, cmap=plt.cm.Blues, ax=ax) #notation: "annot" not "annote" bottom, top = ax.get_ylim() ax.set_ylim(bottom + 0.5, top - 0.5) if title: ax.set_title(title) if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) def plotConfusionMatrix(yTrue, yEst, classes, title=None, fmt='.2g', ax=None): plotHeatMap(metrics.confusion_matrix(yTrue, yEst), classes, title, fmt, ax, \ xlabel='Estimations', ylabel='True values'); ``` # K Nearest Neighbors (KNN) References: - https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm - https://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/ ## Homemade Using a simple algorithm. Unweighted : each of the K neighbors has the same weight ``` # Select a K k = 10 # Create a Panda dataframe in order to link x and y df = pd.DataFrame(np.concatenate((xTrain, labelTrain.reshape(-1,1)), axis=1), columns = ('x0', 'x1', 'label')) # Insert columns to compute the difference of current test to the train and the L2 df.insert(df.shape[1], 'diff0', 0) df.insert(df.shape[1], 'diff1', 0) df.insert(df.shape[1], 'L2', 0) # threshold = k / 2 labelEst0 = np.zeros(xTest.shape[0]) for i, x in enumerate(xTest): # Compute distance and norm to each training sample df['diff0'] = df['x0'] - x[0] df['diff1'] = df['x1'] - x[1] df['L2'] = df['diff0']**2 + df['diff1']**2 # Get the K lowest kSmallest = df.nsmallest(k, 'L2') # Finalize prediction based on the mean labelEst0[i] = np.sum(kSmallest['label']) > threshold ``` ### Performance of homemade model ``` plt.figure(figsize=(12,4)) plt.subplot(1,3,1) plt.scatter(xTest[:,0], xTest[:,1], c=labelEst0, cmap=pltcolors.ListedColormap(testColors), marker='x', alpha=0.2); plt.xlabel('x0') plt.ylabel('x1') plt.grid() plt.title('Estimated') cb = plt.colorbar() loc = np.arange(0,1,1./len(testColors)) cb.set_ticks(loc) cb.set_ticklabels([0,1]); plt.subplot(1,3,2) plt.hist(labelEst0, 10, density=True, alpha=0.5) plt.title('Bernouilli parameter =' + str(np.mean(labelEst0))) plt.subplot(1,3,3) plt.scatter(xTest[:,0], xTest[:,1], c=labelTest, cmap=pltcolors.ListedColormap(colors), marker='x', alpha=0.1); plt.xlabel('x0') plt.ylabel('x1') plt.grid() plt.title('Generator') cb = plt.colorbar() loc = np.arange(0,1,1./len(colors)) cb.set_ticks(loc) cb.set_ticklabels([0,1]); accuracy0 = np.sum(labelTest == labelEst0)/N print('Accuracy =', accuracy0) ``` ### Precision $p(y = 1 \mid \hat{y} = 1)$ ``` print('Precision =', np.sum(labelTest[labelEst0 == 1])/np.sum(labelEst0)) ``` ### Recall $p(\hat{y} = 1 \mid y = 1)$ ``` print('Recall =', np.sum(labelTest[labelEst0 == 1])/np.sum(labelTest)) ``` ### Confusion matrix ``` plotConfusionMatrix(labelTest, labelEst0, np.array(['Blue', 'Red'])); print(metrics.classification_report(labelTest, labelEst0)) ``` This non-parametric model has a the best performance of all models used so far, including the neural network with two layers. The large drawback is the amount of computation for each sample to predict. This method is hardly usable for sample sizes over 10k. # Using SciKit Learn References: - SciKit documentation - https://stackabuse.com/k-nearest-neighbors-algorithm-in-python-and-scikit-learn/ ``` model1 = SkKNeighborsClassifier(n_neighbors=k) model1.fit(xTrain, labelTrain) labelEst1 = model1.predict(xTest) print('Accuracy =', model1.score(xTest, labelTest)) plt.hist(labelEst1*1.0, 10, density=True, alpha=0.5) plt.title('Bernouilli parameter =' + str(np.mean(labelEst1))); ``` ### Confusion matrix (plot) ``` plotConfusionMatrix(labelTest, labelEst1, np.array(['Blue', 'Red'])); ``` ### Classification report ``` print(metrics.classification_report(labelTest, labelEst1)) ``` ### ROC curve ``` logit_roc_auc = metrics.roc_auc_score(labelTest, labelEst1) fpr, tpr, thresholds = metrics.roc_curve(labelTest, model1.predict_proba(xTest)[:,1]) plt.plot(fpr, tpr, label='KNN classification (area = %0.2f)' % logit_roc_auc) plt.plot([0, 1], [0, 1],'r--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right"); ``` # Where to go from here ? - Other linear implementations and simple neural nets using "raw" Python or SciKit Learn([HTML](ClassificationContinuous2Features.html) / [Jupyter](ClassificationContinuous2Features.ipynb)), using TensorFlow([HTML](ClassificationContinuous2Features-TensorFlow.html) / [Jupyter](ClassificationContinuous2Features-TensorFlow.ipynb)), or using Keras ([HTML](ClassificationContinuous2Features-Keras.html)/ [Jupyter](ClassificationContinuous2Features-Keras.ipynb)) - Non linear problem solving with Support Vector Machine (SVM) ([HTML](ClassificationSVM.html) / [Jupyter](ClassificationSVM.ipynb)) - More complex multi-class models on the Czech and Norways flags using Keras ([HTML](ClassificationMulti2Features-Keras.html) / [Jupyter](ClassificationMulti2Features-Keras.ipynb)), showing one of the main motivations to neural networks. - Compare with the two feature linear regression using simple algorithms ([HTML](../linear/LinearRegressionBivariate.html) / [Jupyter](LinearRegressionBivariate.ipynb])), or using Keras ([HTML](LinearRegressionBivariate-Keras.html) / [Jupyter](LinearRegressionUnivariate-Keras.ipynb))
github_jupyter
``` %matplotlib inline ``` ===================================================================== Compute Phase Slope Index (PSI) in source space for a visual stimulus ===================================================================== This example demonstrates how the Phase Slope Index (PSI) [1]_ can be computed in source space based on single trial dSPM source estimates. In addition, the example shows advanced usage of the connectivity estimation routines by first extracting a label time course for each epoch and then combining the label time course with the single trial source estimates to compute the connectivity. The result clearly shows how the activity in the visual label precedes more widespread activity (a postivive PSI means the label time course is leading). References ---------- .. [1] Nolte et al. "Robustly Estimating the Flow Direction of Information in Complex Physical Systems", Physical Review Letters, vol. 100, no. 23, pp. 1-4, Jun. 2008. ``` # Author: Martin Luessi <[email protected]> # # License: BSD (3-clause) import numpy as np import mne from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs from mne.connectivity import seed_target_indices, phase_slope_index print(__doc__) data_path = sample.data_path() subjects_dir = data_path + '/subjects' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' fname_label = data_path + '/MEG/sample/labels/Vis-lh.label' event_id, tmin, tmax = 4, -0.2, 0.3 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) # Load data inverse_operator = read_inverse_operator(fname_inv) raw = mne.io.read_raw_fif(fname_raw) events = mne.read_events(fname_event) # pick MEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, exclude='bads') # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6)) # Compute inverse solution and for each epoch. Note that since we are passing # the output to both extract_label_time_course and the phase_slope_index # functions, we have to use "return_generator=False", since it is only possible # to iterate over generators once. snr = 1.0 # use lower SNR for single epochs lambda2 = 1.0 / snr ** 2 stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, pick_ori="normal", return_generator=True) # Now, we generate seed time series by averaging the activity in the left # visual corex label = mne.read_label(fname_label) src = inverse_operator['src'] # the source space used seed_ts = mne.extract_label_time_course(stcs, label, src, mode='mean_flip', verbose='error') # Combine the seed time course with the source estimates. There will be a total # of 7500 signals: # index 0: time course extracted from label # index 1..7499: dSPM source space time courses stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, pick_ori="normal", return_generator=True) comb_ts = list(zip(seed_ts, stcs)) # Construct indices to estimate connectivity between the label time course # and all source space time courses vertices = [src[i]['vertno'] for i in range(2)] n_signals_tot = 1 + len(vertices[0]) + len(vertices[1]) indices = seed_target_indices([0], np.arange(1, n_signals_tot)) # Compute the PSI in the frequency range 8Hz..30Hz. We exclude the baseline # period from the connectivity estimation fmin = 8. fmax = 30. tmin_con = 0. sfreq = raw.info['sfreq'] # the sampling frequency psi, freqs, times, n_epochs, _ = phase_slope_index( comb_ts, mode='multitaper', indices=indices, sfreq=sfreq, fmin=fmin, fmax=fmax, tmin=tmin_con) # Generate a SourceEstimate with the PSI. This is simple since we used a single # seed (inspect the indices variable to see how the PSI scores are arranged in # the output) psi_stc = mne.SourceEstimate(psi, vertices=vertices, tmin=0, tstep=1, subject='sample') # Now we can visualize the PSI using the plot method. We use a custom colormap # to show signed values v_max = np.max(np.abs(psi)) brain = psi_stc.plot(surface='inflated', hemi='lh', time_label='Phase Slope Index (PSI)', subjects_dir=subjects_dir, clim=dict(kind='percent', pos_lims=(95, 97.5, 100))) brain.show_view('medial') brain.add_label(fname_label, color='green', alpha=0.7) ```
github_jupyter
# 📝 Exercise M7.03 As with the classification metrics exercise, we will evaluate the regression metrics within a cross-validation framework to get familiar with the syntax. We will use the Ames house prices dataset. ``` import pandas as pd import numpy as np ames_housing = pd.read_csv("../datasets/house_prices.csv") data = ames_housing.drop(columns="SalePrice") target = ames_housing["SalePrice"] data = data.select_dtypes(np.number) target /= 1000 ``` <div class="admonition note alert alert-info"> <p class="first admonition-title" style="font-weight: bold;">Note</p> <p class="last">If you want a deeper overview regarding this dataset, you can refer to the Appendix - Datasets description section at the end of this MOOC.</p> </div> The first step will be to create a linear regression model. ``` # Write your code here. from sklearn.linear_model import LinearRegression linreg = LinearRegression() ``` Then, use the `cross_val_score` to estimate the generalization performance of the model. Use a `KFold` cross-validation with 10 folds. Make the use of the $R^2$ score explicit by assigning the parameter `scoring` (even though it is the default score). ``` from sklearn.model_selection import cross_val_score scores = cross_val_score(linreg, data, target, cv=10, scoring='r2') print(f"R2 score: {scores.mean():.3f} +/- {scores.std():.3f}") # Write your code here. from sklearn.model_selection import cross_validate result_linreg_r2 = cross_validate(linreg, data, target, cv=10, scoring="r2") result_reg_r2_df = pd.DataFrame(result_linreg_r2) result_reg_r2_df print(f"R2 result for linreg: {result_reg_r2_df['test_score'].mean():.3f} +/- {result_reg_r2_df['test_score'].std():.3f}") ``` Then, instead of using the $R^2$ score, use the mean absolute error. You need to refer to the documentation for the `scoring` parameter. ``` # Write your code here. result_linreg_mae = cross_validate(linreg, data, target, cv=10, scoring="neg_mean_absolute_error") result_reg_mae_df = pd.DataFrame(result_linreg_mae) result_reg_mae_df scores = cross_val_score(linreg, data, target, cv=10, scoring='neg_mean_absolute_error') scores = -scores print(f"Mean Absolute Error: {scores.mean():.3f} +/- {scores.std():.3f}") print(f"Mean Absolute Error result for linreg: {-result_reg_mae_df['test_score'].mean():.3f} +/- {-result_reg_mae_df['test_score'].std():.3f}") ``` Finally, use the `cross_validate` function and compute multiple scores/errors at once by passing a list of scorers to the `scoring` parameter. You can compute the $R^2$ score and the mean absolute error for instance. ``` # Write your code here. scoring = ["r2", "neg_mean_absolute_error"] result_linreg_duo = cross_validate(linreg, data, target, cv=10, scoring=scoring) scores = {"R2": result_linreg_duo["test_r2"], "MAE": -result_linreg_duo["test_neg_mean_absolute_error"]} scores_df = pd.DataFrame(scores) scores_df result_linreg_duo ```
github_jupyter
# Flights data preparation ``` from pyspark.sql import SQLContext from pyspark.sql import DataFrame from pyspark.sql import Row from pyspark.sql.types import * import pandas as pd import StringIO import matplotlib.pyplot as plt hc = sc._jsc.hadoopConfiguration() hc.set("hive.execution.engine", "mr") ``` ## Function to parse CSV ``` import csv def parseCsv(csvStr): f = StringIO.StringIO(csvStr) reader = csv.reader(f, delimiter=',') row = reader.next() return row scsv = '"02Q","Titan Airways"' row = parseCsv(scsv) print row[0] print row[1] working_storage = 'WORKING_STORAGE' output_directory = 'jupyter/py2' protocol_name = 'PROTOCOL_NAME://' ``` ## Parse and convert Carrier data to parquet ``` carriersHeader = 'Code,Description' carriersText = sc.textFile(protocol_name + working_storage + "/jupyter_dataset/carriers.csv").filter(lambda x: x != carriersHeader) carriers = carriersText.map(lambda s: parseCsv(s)) \ .map(lambda s: Row(code=s[0], description=s[1])).cache().toDF() carriers.write.mode("overwrite").parquet(protocol_name + working_storage + "/" + output_directory + "/carriers") sqlContext.registerDataFrameAsTable(carriers, "carriers") carriers.limit(20).toPandas() ``` ## Parse and convert to parquet Airport data ``` airportsHeader= '"iata","airport","city","state","country","lat","long"' airports = sc.textFile(protocol_name + working_storage + "/jupyter_dataset/airports.csv") \ .filter(lambda x: x != airportsHeader) \ .map(lambda s: parseCsv(s)) \ .map(lambda p: Row(iata=p[0], \ airport=p[1], \ city=p[2], \ state=p[3], \ country=p[4], \ lat=float(p[5]), \ longt=float(p[6])) \ ).cache().toDF() airports.write.mode("overwrite").parquet(protocol_name + working_storage + "/" + output_directory + "/airports") sqlContext.registerDataFrameAsTable(airports, "airports") airports.limit(20).toPandas() ``` ## Parse and convert Flights data to parquet ``` flightsHeader = 'Year,Month,DayofMonth,DayOfWeek,DepTime,CRSDepTime,ArrTime,CRSArrTime,UniqueCarrier,FlightNum,TailNum,ActualElapsedTime,CRSElapsedTime,AirTime,ArrDelay,DepDelay,Origin,Dest,Distance,TaxiIn,TaxiOut,Cancelled,CancellationCode,Diverted,CarrierDelay,WeatherDelay,NASDelay,SecurityDelay,LateAircraftDelay' flights = sc.textFile(protocol_name + working_storage + "/jupyter_dataset/2008.csv.bz2") \ .filter(lambda x: x!= flightsHeader) \ .map(lambda s: parseCsv(s)) \ .map(lambda p: Row(Year=int(p[0]), \ Month=int(p[1]), \ DayofMonth=int(p[2]), \ DayOfWeek=int(p[3]), \ DepTime=p[4], \ CRSDepTime=p[5], \ ArrTime=p[6], \ CRSArrTime=p[7], \ UniqueCarrier=p[8], \ FlightNum=p[9], \ TailNum=p[10], \ ActualElapsedTime=p[11], \ CRSElapsedTime=p[12], \ AirTime=p[13], \ ArrDelay=int(p[14].replace("NA", "0")), \ DepDelay=int(p[15].replace("NA", "0")), \ Origin=p[16], \ Dest=p[17], \ Distance=long(p[18]), \ TaxiIn=p[19], \ TaxiOut=p[20], \ Cancelled=p[21], \ CancellationCode=p[22], \ Diverted=p[23], \ CarrierDelay=int(p[24].replace("NA", "0")), \ CarrierDelayStr=p[24], \ WeatherDelay=int(p[25].replace("NA", "0")), \ WeatherDelayStr=p[25], \ NASDelay=int(p[26].replace("NA", "0")), \ SecurityDelay=int(p[27].replace("NA", "0")), \ LateAircraftDelay=int(p[28].replace("NA", "0")))) \ .toDF() flights.write.mode("ignore").parquet(protocol_name + working_storage + "/" + output_directory + "/flights") sqlContext.registerDataFrameAsTable(flights, "flights") flights.limit(10).toPandas()[["ArrDelay","CarrierDelay","CarrierDelayStr","WeatherDelay","WeatherDelayStr","Distance"]] ```
github_jupyter
<a href="https://colab.research.google.com/github/harnalashok/hadoop/blob/main/hadoop_spark_install_on_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` # Last amended: 30th March, 2021 # Myfolder: github/hadoop # Objective: # i) Install hadoop on colab # (current version is 3.2.2) # ii) Experiments with hadoop # iii) Install spark on colab # iv) Access hadoop file from spark # v) Install koalas on colab # # # Java 8 install: https://stackoverflow.com/a/58191107 # Hadoop install: https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/SingleCluster.html # Spark install: https://stackoverflow.com/a/64183749 # https://www.analyticsvidhya.com/blog/2020/11/a-must-read-guide-on-how-to-work-with-pyspark-on-google-colab-for-data-scientists/ ``` ## Install hadoop If it takes too long, it means, it is awaiting input from you regarding overwriting ssh keys ### Define functions No downloads. Just function definitions ``` # 1.0 How to set environment variable import os import time ``` #### ssh_install() ``` # 2.0 Function to install ssh client and sshd (Server) def ssh_install(): print("\n--1. Download and install ssh server----\n") ! sudo apt-get remove openssh-client openssh-server ! sudo apt install openssh-client openssh-server print("\n--2. Restart ssh server----\n") ! service ssh restart ``` #### Java install ``` # 3.0 Function to download and install java 8 def install_java(): ! rm -rf /usr/java print("\n--Download and install Java 8----\n") !apt-get install -y openjdk-8-jdk-headless -qq > /dev/null # install openjdk os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" # set environment variable !update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java !update-alternatives --set javac /usr/lib/jvm/java-8-openjdk-amd64/bin/javac !mkdir -p /usr/java ! ln -s "/usr/lib/jvm/java-8-openjdk-amd64" "/usr/java" ! mv "/usr/java/java-8-openjdk-amd64" "/usr/java/latest" !java -version #check java version !javac -version ``` #### hadoop install ``` # 4.0 Function to download and install hadoop def hadoop_install(): print("\n--5. Download hadoop tar.gz----\n") ! wget -c https://mirrors.estointernet.in/apache/hadoop/common/hadoop-3.2.2/hadoop-3.2.2.tar.gz print("\n--6. Transfer downloaded content and unzip tar.gz----\n") ! mv /content/hadoop* /opt/ ! tar -xzf /opt/hadoop-3.2.2.tar.gz --directory /opt/ print("\n--7. Create hadoop folder----\n") ! rm -r /app/hadoop/tmp ! mkdir -p /app/hadoop/tmp print("\n--8. Check folder for files----\n") ! ls -la /opt ``` #### hadoop config ``` # 5.0 Function for setting hadoop configuration def hadoop_config(): print("\n--Begin Configuring hadoop---\n") print("\n=============================\n") print("\n--9. core-site.xml----\n") ! cat /opt/hadoop-3.2.2/etc/hadoop/core-site.xml print("\n--10. Amend core-site.xml----\n") ! echo '<?xml version="1.0" encoding="UTF-8"?>' > /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo '<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <name>fs.defaultFS</name>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <value>hdfs://localhost:9000</value>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <name>hadoop.tmp.dir</name>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <value>/app/hadoop/tmp</value>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <description>A base for other temporary directories.</description>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml # Added following regarding safemode from here: # https://stackoverflow.com/a/33800253 ! echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <name>dfs.safemode.threshold.pct</name>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <value>0</value>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' </configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml print("\n--11. Amended core-site.xml----\n") ! cat /opt/hadoop-3.2.2/etc/hadoop/core-site.xml print("\n--12. yarn-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo '<?xml version="1.0" encoding="UTF-8"?>' > /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo '<configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <name>yarn.nodemanager.aux-services</name>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <value>mapreduce_shuffle</value>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <name>yarn.nodemanager.vmem-check-enabled</name>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <value>false</value>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' </configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml print("\n--13. Amended yarn-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml print("\n--14. mapred-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml print("\n--15. Amend mapred-site.xml----\n") !echo '<?xml version="1.0"?>' > /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo '<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo '<configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <name>mapreduce.framework.name</name>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <value>yarn</value>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <name>yarn.app.mapreduce.am.env</name>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <name>mapreduce.map.env</name>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <name>mapreduce.reduce.env</name>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo '</configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml print("\n--16, Amended mapred-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml print("\n---17. hdfs-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml print("\n---18. Amend hdfs-site.xml----\n") !echo '<?xml version="1.0" encoding="UTF-8"?> ' > /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo '<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo '<configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <name>dfs.replication</name>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <value>1</value>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <name>dfs.block.size</name>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <value>16777216</value>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <description>Block size</description>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo '</configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml print("\n---19. Amended hdfs-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml print("\n---20. hadoop-env.sh----\n") # https://stackoverflow.com/a/53140448 !cat /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export JAVA_HOME="/usr/lib/jvm/java-8-openjdk-amd64"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export HDFS_NAMENODE_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export HDFS_DATANODE_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export HDFS_SECONDARYNAMENODE_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export YARN_RESOURCEMANAGER_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export YARN_NODEMANAGER_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh print("\n---21. Amended hadoop-env.sh----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ``` #### ssh keys ``` # 6.0 Function tp setup ssh passphrase def set_keys(): print("\n---22. Generate SSH keys----\n") ! cd ~ ; pwd ! cd ~ ; ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa ! cd ~ ; cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys ! cd ~ ; chmod 0600 ~/.ssh/authorized_keys ``` #### Set environment ``` # 7.0 Function to set up environmental variables def set_env(): print("\n---23. Set Environment variables----\n") # 'export' command does not work in colab # https://stackoverflow.com/a/57240319 os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" #set environment variable os.environ["JRE_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64/jre" os.environ["HADOOP_HOME"] = "/opt/hadoop-3.2.2" os.environ["HADOOP_CONF_DIR"] = "/opt/hadoop-3.2.2/etc/hadoop" os.environ["LD_LIBRARY_PATH"] += ":/opt/hadoop-3.2.2/lib/native" os.environ["PATH"] += ":/opt/hadoop-3.2.2/bin:/opt/hadoop-3.2.2/sbin" ``` #### Install all function ``` # 8.0 Function to call all functions def install_hadoop(): print("\n--Install java----\n") ssh_install() install_java() hadoop_install() hadoop_config() set_keys() set_env() ``` ### Begin install Start downloading, install and configure. Takes around 2 minutes ``` # 9.0 Start installation start = time.time() install_hadoop() end = time.time() print("\n---Time taken----\n") print((end- start)/60) ``` ### Format hadoop ``` # 10.0 Format hadoop print("\n---24. Format namenode----\n") !hdfs namenode -format ``` ## Start and test hadoop If namenode is in safemode, use the command: `!hdfs dfsadmin -safemode leave` #### Start hadoop If start fails with 'Connection refused', run `ssh_install()` once again ``` # 11.0 Start namenode # If this fails, run # ssh_install() below # and start hadoop again: print("\n---25. Start namenode----\n") ! start-dfs.sh #ssh_install() ``` #### Start yarn ``` # 11.1 Start yarn ! start-yarn.sh ``` If `start-dfs.sh` fails, issue the following three commands, one after another:<br> `! sudo apt-get remove openssh-client openssh-server`<br> `! sudo apt-get install openssh-client openssh-server`<br> `! service ssh restart`<br> And then try to start hadoop again, as: `start-dfs.sh` #### Test hadoop IF in safe mode, leave safe mode as:<br> `!hdfs dfsadmin -safemode leave` ``` # 11.1 print("\n---26. Make folders in hadoop----\n") ! hdfs dfs -mkdir /user ! hdfs dfs -mkdir /user/ashok # 11.2 Run hadoop commands ! hdfs dfs -ls / ! hdfs dfs -ls /user # 11.3 Stopping hadoop # Gives some errors # But hadoop stops #!stop-dfs.sh ``` Run the `ssh_install()` again if hadoop fails to start with `start-dfs.sh` and then try to start hadoop again. ## Install spark ### Define functions `findspark`: PySpark isn't on `sys.path` by default, but that doesn't mean it can't be used as a regular library. You can address this by either symlinking pyspark into your site-packages, or adding `pyspark` to `sys.path` at runtime. `findspark` does the latter. ``` # 1.0 Function to download and unzip spark def spark_koalas_install(): print("\n--1.1 Install findspark----\n") !pip install -q findspark print("\n--1.2 Install databricks Koalas----\n") !pip install koalas print("\n--1.3 Download Apache tar.gz----\n") ! wget -c https://mirrors.estointernet.in/apache/spark/spark-3.1.1/spark-3.1.1-bin-hadoop3.2.tgz print("\n--1.4 Transfer downloaded content and unzip tar.gz----\n") ! mv /content/spark* /opt/ ! tar -xzf /opt/spark-3.1.1-bin-hadoop3.2.tgz --directory /opt/ print("\n--1.5 Check folder for files----\n") ! ls -la /opt # 1.1 Function to set environment def set_spark_env(): print("\n---2. Set Environment variables----\n") os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["JRE_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64/jre" os.environ["SPARK_HOME"] = "/opt/spark-3.1.1-bin-hadoop3.2" os.environ["LD_LIBRARY_PATH"] += ":/opt/spark-3.1.1-bin-hadoop3.2/lib/native" os.environ["PATH"] += ":/opt/spark-3.1.1-bin-hadoop3.2/bin:/opt/spark-3.1.1-bin-hadoop3.2/sbin" print("\n---2.1. Check Environment variables----\n") # Check ! echo $PATH ! echo $LD_LIBRARY_PATH # 1.2 Function to configure spark def spark_conf(): print("\n---3. Configure spark to access hadoop----\n") !mv /opt/spark-3.1.1-bin-hadoop3.2/conf/spark-env.sh.template /opt/spark-3.1.1-bin-hadoop3.2/conf/spark-env.sh !echo "HADOOP_CONF_DIR=/opt/hadoop-3.2.2/etc/hadoop/" >> /opt/spark-3.1.1-bin-hadoop3.2/conf/spark-env.sh print("\n---3.1 Check ----\n") #!cat /opt/spark-3.1.1-bin-hadoop3.2/conf/spark-env.sh ``` ### Install spark ``` # 2.0 Call all the three functions def install_spark(): spark_koalas_install() set_spark_env() spark_conf() # 2.1 install_spark() ``` ## Test spark Hadoop should have been started Call some libraries ``` # 3.0 Just call some libraries to test import pandas as pd import numpy as np # 3.1 Get spark in sys.path import findspark findspark.init() # 3.2 Call other spark libraries # Just to test from pyspark.sql import SparkSession import databricks.koalas as ks from pyspark.ml.feature import VectorAssembler from pyspark.ml.regression import LinearRegression # 3.1 Build spark session spark = SparkSession. \ builder. \ master("local[*]"). \ getOrCreate() # 4.0 Pandas DataFrame pdf = pd.DataFrame({ 'x1': ['a','a','b','b', 'b', 'c', 'd','d'], 'x2': ['apple', 'orange', 'orange','orange', 'peach', 'peach','apple','orange'], 'x3': [1, 1, 2, 2, 2, 4, 1, 2], 'x4': [2.4, 2.5, 3.5, 1.4, 2.1,1.5, 3.0, 2.0], 'y1': [1, 0, 1, 0, 0, 1, 1, 0], 'y2': ['yes', 'no', 'no', 'yes', 'yes', 'yes', 'no', 'yes'] }) # 4.1 pdf # 4.2 Transform to Spark DataFrame df = spark.createDataFrame(pdf) df.show() # 4.3 Create a csv file # and tranfer it to hdfs !echo "a,b,c,d" > /content/airports.csv !echo "5,4,6,7" >> /content/airports.csv !echo "2,3,4,5" >> /content/airports.csv !echo "8,9,0,1" >> /content/airports.csv !echo "2,3,4,1" >> /content/airports.csv !echo "1,2,2,1" >> /content/airports.csv !echo "0,1,2,6" >> /content/airports.csv !echo "9,3,1,8" >> /content/airports.csv !ls -la /content # 4.4 !hdfs dfs -rm -f /user/ashok/airports.csv !hdfs dfs -put /content/airports.csv /user/ashok/ !hdfs dfs -ls /user/ashok # 5.0 Read file directly from hadoop airports_df = spark.read.csv( "/user/ashok/airports.csv", inferSchema = True, header = True ) # 5.1 Show file airports_df.show() ``` ## Test Koalas Hadoop should have been started Create a koalas dataframe ``` # 6.0 # If namenode is in safemode, first use: # hdfs dfsadmin -safemode leave kdf = ks.DataFrame( { 'a': [1, 2, 3, 4, 5, 6], 'b': [100, 200, 300, 400, 500, 600], 'c': ["one", "two", "three", "four", "five", "six"] }, index=[10, 20, 30, 40, 50, 60] ) # 6.1 And show kdf # 6.2 Pandas DataFrame pdf = pd.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']}) # 6.2.1 Transform to koalas DataFrame df = ks.from_pandas(pdf) # 6.3 Rename koalas dataframe columns df.columns = ['x', 'y', 'z1'] # 6.4 Do some operations on koalas DF, in place: df['x2'] = df.x * df.x # 6.6 Finally show koalas df df # 6.7 Read csv file from hadoop # and create koalas df ks.read_csv("/user/ashok/airports.csv").head(10) ################### ```
github_jupyter
# OBJECTF Predire $\rho$, $\sigma_a$ et $\sigma_c$ en fonction de $E_r$, $F_r$, et $T_r$ a droite du domaine en toute temps # PREPARATION ## Les imports ``` %reset -f import matplotlib.pyplot as plt import numpy as np import pandas as pd from ast import literal_eval as l_eval np.set_printoptions(precision = 3) ``` ## Chargement des donnees ``` # """ VERSION COLAB """ # # to load data from my personal github repo (update it if we have to) # import os # if not os.path.exists("assets"): # print("Data wansn't here. Let's download it!") # !git clone https://github.com/desmond-rn/assets.git # else: # print("Data already here. Let's update it!") # %cd assets # # %rm -rf assets # !git pull https://github.com/desmond-rn/assets.git # %cd .. # print("\n") # !ls assets/dataframes/inverse # df_path = "assets/dataframes/inverse/df_temporal.csv" # """ VERSION JUPYTER """ # to load data locally %ls "../../data" df_t_path = "../../data/df_temporal.csv" df_s_path = "../../data/df_spatial.csv" ``` ## Donnees temporelles ``` types = {'rho_expr':str, 'sigma_a_expr':str, 'sigma_c_expr':str, 'E_x_0_expr':str, 'F_x_0_expr':str, 'T_x_0_expr':str} converters={'t':l_eval, 'E_l':l_eval, 'F_l':l_eval, 'T_l':l_eval, 'E_r':l_eval, 'F_r':l_eval, 'T_r':l_eval} # on veut convertir les str en listes df_t = pd.read_csv(df_t_path, thousands=',', dtype=types, converters=converters) df_t.head(2) ``` ## Donnees spatiales ``` types = {'rho_expr':str, 'sigma_a_expr':str, 'sigma_c_expr':str, 'E_x_0_expr':str, 'F_x_0_expr':str, 'T_x_0_expr':str} converters={'x':l_eval, 'rho':l_eval, 'sigma_a':l_eval, 'sigma_c':l_eval, 'E_0':l_eval, 'F_0':l_eval, 'T_0':l_eval, 'E':l_eval, 'F':l_eval, 'T':l_eval} df_s = pd.read_csv(df_s_path, thousands=',', dtype=types, converters=converters) df_s.head(2) ``` ## Prerequis pour cet apprentissage Tous les unputs doivent etre similaires sur un certain nombre de leurs parametres. ``` t_f = 0.005 x_min = 0 x_max = 1 for i in range(len(df_t)): assert df_t.loc[i, 't_f'] == 0.005 assert df_t.loc[i, 'E_0_expr'] == "0.01372*(5^4)" # etc... assert df_t.loc[i, 'x_min'] == x_min assert df_t.loc[i, 'x_max'] == x_max ``` ## Visualisation ``` """ Visualisons les signaux sur la droite et la densite sur le domaine """ def plot_inputs(ax, df_t, index): t = np.array(df_t.loc[index, 't']) # inputs E_r = np.array(df_t.loc[index, 'E_r']) F_r = np.array(df_t.loc[index, 'F_r']) T_r = np.array(df_t.loc[index, 'T_r']) # plot ax[0].plot(t, E_r, 'b', label='énergie à droite', lw=3) ax[0].set_ylim(8.275, 8.875) ax[0].set_xlabel('t') ax[0].legend() ax[1].plot(t, F_r, 'y', label='flux à droite', lw=3) ax[1].set_ylim(-0.25, 0.25) ax[1].set_xlabel('t') ax[1].legend() ax[2].plot(t, T_r, 'r', label='température à droite', lw=3) ax[2].set_ylim(4.96, 5.04) ax[2].set_xlabel('t') ax[2].legend() def plot_output(ax, df_s, index): x = np.array(df_s.loc[index, 'x']) rho = np.array(df_s.loc[index, 'rho']) # plot ax.plot(x, rho, 'm--', label='densité') ax.set_ylim(0.5, 10.5) ax.set_xlabel('x') ax.legend() def plot_io(index): fig, ax = plt.subplots(2, 3, figsize=(12, 6)) fig.delaxes(ax[1][0]) fig.delaxes(ax[1][2]) plot_inputs(ax[0], df_t, index) plot_output(ax[1, 1], df_s, index) plt.tight_layout() index = 0 plot_io(index) ``` ## Creation des inputs X Pour chacun des signaux E_r, F_r et T_r, il faut tout d'abord: - Tronquer le signal pour ne ne garder que la fin - Reechantilloner le signal pour ne garder que 20, voir 50 pas de temps ``` """ Permet de couper le debut du signal, parite toujours constante. Retourne la fraction de fin """ def trim(input, ratio): len_input = len(input) len_output = int(len_input*ratio) return input[len_input-len_output:] """ Fonction pour extraire n pas d'iterations """ def resample(input, len_output): len_input = len(input) output = [] for i in np.arange(0, len_input, len_input//len_output): output.append(input[i]) return np.array(output)[1:] """ Testons avec un exemple """ t = np.array(df_t.loc[index, 't']) E_r = np.array(df_t.loc[index, 'E_r']) ratio, len_output = 1/2, 20 t = resample(trim(t, ratio), len_output) E_r = resample(trim(E_r, ratio), len_output) fig, ax = plt.subplots(1, 1, figsize=(6, 4)) ax.plot(t, E_r, 'b', label='énergie à droite coupé et reechantilloné', lw=3) ax.set_ylim(8.275, 8.875) ax.set_xlabel('t') ax.legend(); """ Generation les inputs X """ size = len(df_t) X = np.empty(shape=(size, 3, len_output), dtype=float) for i in range(size): X[i][0] = resample(trim(df_t.loc[i, 'E_r'], ratio), len_output) X[i][1] = resample(trim(df_t.loc[i, 'F_r'], ratio), len_output) X[i][2] = resample(trim(df_t.loc[i, 'T_r'], ratio), len_output) print("X shape =", X.shape) ``` ## Creations des outputs y Pour le signal rho, il faut tout d'abord: - Detecter la position, la hauteur et la larrgeur de chaque crenau ``` """ Calcule les decalages a droite et a gauche d'un signal """ def decay(signal): signal_right = np.zeros_like(signal) signal_right[1:] = signal[:-1] signal_right[0] = signal[0] signal_left = np.zeros_like(signal) signal_left[:-1] = signal[1:] signal_left[-1] = signal[-1] return signal_left, signal_right """ Fonction de lissage laplacien 3-means d'un signal """ def smooth(signal): signal_left, signal_right = decay(signal) return (signal + signal_left + signal_right) / 3. """ Pour eliminer les tres tres faibles valeurs dans un signal """ def sharpen(signal, precision): return np.where(abs(signal) < precision, np.zeros_like(signal), signal) """ Pour afficher un signal et sa derivee seconde """ def plot_signal(ax, signal): signal_left, signal_right = decay(signal) diff = -2*signal + signal_right + signal_left diff = sharpen(diff, 1e-4) ax[0].plot(signal, 'm--', label='signal') ax[1].plot(diff[1:-1], 'c--', label='derivee seconde du signal'); ax[0].legend() ax[1].legend() """ Une fonction pour detecter la position, hauteur et largeur des crenaux """ def detect_niches(signal): signal_left, signal_right = decay(signal) diff = -2*signal + signal_right + signal_left diff = sharpen(diff, 1e-4) # zero_crossings = [] # les points de traverse du 0 niches = [] # les crenaux detectes prev = diff[0] next = diff[2] ended = False # indique si on aretrouve la fin d'un crenau start = 1 end = 1 step = 1 # pas de recherche i = step len_signal = len(diff) while i < len_signal-step: prev = diff[i-step] val = diff[i] next = diff[i+step] if prev > 0. and next < 0.: # zero_crossings.append(i) start = i ended = False if i == len_signal-step-1 and ended == False: prev = -1. next = 1. if prev < 0. and next > 0. and ended==False: # zero_crossings.append(i) end = i niche_width = end - start # largeur relative a N = len_signal niche_center = (end + start) // 2 # position relative a N niche_height = signal[niche_center] # hauteur du crenaux niches.append((niche_center, niche_height, niche_width)) ended = True # print(i, ended) # print(prev, next) i += 1 return niches """ Testons avec un exemple """ signal = np.zeros(500) signal[100:170] = 5. # ajout des crenaux signal[250:265] = 3. signal[325:375] = 10. for i in range(25): # lissage du signal signal = smooth(signal) fig, ax = plt.subplots(1, 2, figsize=(12, 4)) plot_signal(ax, signal) niches = detect_niches(signal) print("Position, hauteur et largeur des creneaux detectes") for el in niches: print(" -", el) """ Testons sur un vrai rho """ # signal = np.array(df_s.loc[4, 'rho']) # fig, ax = plt.subplots(1, 2, figsize=(12, 4)) # plot_signal(ax, signal) # niches = detect_niches(signal) # for el in niches: # print(" -", el) """ Pour creer les y, il faut normaliser par rapport a l'abcisse du domaine """ y = np.empty(shape=(size, 3), dtype=float) for i in range(size): x = np.array(df_s.loc[i, 'x']) rho = np.array(df_s.loc[i, 'rho']) niche = detect_niches(rho)[0] # on suppose qu'il ny a qu'un seul créneau dx = (x_max - x_min) / df_s.loc[i, 'N'] # xmin = 0, xmax = 1 bien sur. condition necessaire pour cette etude y[i][0] = x[niche[0]] # position relative a x y[i][1] = niche[1] # hauteur y[i][2] = niche[2]*dx # largeur # print(i, niche) # print(i, y[i]) print("y shape =", np.shape(y)) ``` ## Separation des donnees train, test et val ``` len_train, len_val = 60, 20 X_train = X[:len_train] X_val = X[len_train:len_train+len_val] X_test = X[len_train+len_val:] y_train = y[:len_train] y_val = y[len_train:len_train+len_val] y_test = y[len_train+len_val:] print("X shapes =", np.shape(X_train), np.shape(X_val), np.shape(X_test)) print("y shapes =", np.shape(y_train), np.shape(y_val), np.shape(y_test)) ``` # APPRENTISSAGE
github_jupyter
## Phase 3 - deployment #### This notebook will provide and overview how to deploy and predict the CPE in two ways - The model was build/export in the last notebook (Phase_2_Advanced_Analytics__predictions) <br> This notebook show another option to save/export the model using the H2O flow UI and complement the information with deployment for predictions. The predictions will be presented in 2 ways - Batch process - Online / real time predictions <div class="alert alert-block alert-info"> <b>Export model:</b> Export the model GBM (best performance) using H2O flow UI as detailed below </div> ``` from IPython.display import Image Image(filename='./data/H2O-FLOW-UI-GBM-MODEL.PNG') from IPython.display import Image Image(filename='./data/H2O-FLOW-UI-GBM-MODEL-download.PNG') ``` ## Sample of new campaigns to be predicted ``` import pandas as pd df = pd.read_csv('./GBM_MODEL/New_campaings_for_predictions.csv') df.tail(10) ``` ### Important attention point - All information will be provided for prediction (base information available in the simulated/demo data) however just the relevant information were used during the model build detailed in the Notebook: Phase_2_Advanced_Analytics__predictions <br> - For example LineItemsID is just an index number and do not provide relevant information and is not going to be used for prediction <div class="alert alert-block alert-info"> <b>Batch Prediction:</b> Generate prediction for new data </div> #### To execute the prediction as presented below it is not necessary to have an H2O cluster running ##### The processo show below was executed in 2 steps to show in detail the process but in production environment this process must be executed in just one step ###### &emsp; Simulation in 2 steps Step 1. batch process to run the java program <br>Step 2. python program to link the new data and the predictions with the CPE <br> &emsp; &emsp; Can be used any programming language to run the prediction and get the results (such as R, Python, Java, C#, ...) ### Run batch java process to gererate/score the predictions of CPE ``` ## To generate prediction (CPE) for new data just run the command ## EXAMPLE ## java -Xmx4g -XX:ReservedCodeCacheSize=256m -cp <h2o-genmodel.jar_EXPORTED_ABOVE> hex.genmodel.tools.PredictCsv --mojo <GBM_log_CPE_model.zip_EXPORTED_ABOVE> --input INPUT_FILE_FOR_PREDICTION.csv --output OUTUPUT_FILE_WITH_PREDICTIONS_FOR_CPE__EXPORT_EXPORT_PREDICTIONS.csv --decimal ## REAL PREDICTION ## java -Xmx4g -XX:ReservedCodeCacheSize=256m -cp h2o-genmodel.jar hex.genmodel.tools.PredictCsv --mojo GBM_log_CPE_model.zip --input New_campaings_for_predictions.csv --output New_campaings_for_predictions__EXPORT_EXPORT_PREDICTIONS.csv --decimal from IPython.display import Image Image(filename='./data/Batch-prediction-h2o.PNG') ``` ### Sincronize all information - new campaign data and new predictions for CPE - Remember that the prediction was done in logarithmic scale and now is necessary to rever the result with exponential function ``` CPE_predictions = pd.read_csv('./GBM_MODEL/New_campaings_for_predictions__EXPORT_EXPORT_PREDICTIONS.csv') CPE_predictions.tail() import numpy as np df['CPE_predition_LOG'] = CPE_predictions['predict'] df['CPE_predition'] = round(np.exp(CPE_predictions['predict']) -1, 3) df.tail() ``` <div class="alert alert-block alert-info"> <b>Online prediction:</b> Generate prediction for new data </div> ### The online prediction could be implemented using diferent architectures such as 1. Serverless function such as Amazon AWS Lambda + API Gateway <br> https://aws.amazon.com/lambda/?nc2=h_ql_prod_fs_lbd 2. Java program that use POJO/MOJO model for online prediction <br> http://docs.h2o.ai/h2o/latest-stable/h2o-docs/productionizing.html#step-2-compile-and-run-the-mojo 3. Microservices architecture using Docker (python + flask app + NGINX for load balance) <br> Could be implemented on-premise solution or even using cloud solutions such as container orchestration as GKE (Google Kubernetes Engine) <br> https://cloud.google.com/kubernetes-engine/ The solution presented below show the prediction done trought one json information passed to the URL <br> &emsp; This API could be deployed in any of the 3 options detailed above ``` from IPython.display import Image Image(filename='./data/Online-Prediction.PNG') ``` ## Summary and final considerations ##### The model build in Phase 2 and also exported in this notebook can be deployed for batch and online predictions - Batch process => the batch process is the way to go to predict large ammount of campaigns and for back-office analysis using some BI tools - Online prediction => The online prediction using microservices architecture for example, is the way to go if the company has online interfaces integrated with lauch campaign programs. With this approach is possible to analyse specific campaign prediction
github_jupyter
# Comprehensive Example ``` # Enabling the `widget` backend. # This requires jupyter-matplotlib a.k.a. ipympl. # ipympl can be install via pip or conda. %matplotlib widget import matplotlib.pyplot as plt import numpy as np # Testing matplotlib interactions with a simple plot fig = plt.figure() plt.plot(np.sin(np.linspace(0, 20, 100))); # Always hide the toolbar fig.canvas.toolbar_visible = False # Put it back to its default fig.canvas.toolbar_visible = 'fade-in-fade-out' # Change the toolbar position fig.canvas.toolbar_position = 'top' # Hide the Figure name at the top of the figure fig.canvas.header_visible = False # Hide the footer fig.canvas.footer_visible = False # Disable the resizing feature fig.canvas.resizable = False # If true then scrolling while the mouse is over the canvas will not move the entire notebook fig.canvas.capture_scroll = True ``` You can also call `display` on `fig.canvas` to display the interactive plot anywhere in the notebooke ``` fig.canvas.toolbar_visible = True display(fig.canvas) ``` Or you can `display(fig)` to embed the current plot as a png ``` display(fig) ``` # 3D plotting ``` from mpl_toolkits.mplot3d import axes3d fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Grab some test data. X, Y, Z = axes3d.get_test_data(0.05) # Plot a basic wireframe. ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10) plt.show() ``` # Subplots ``` # A more complex example from the matplotlib gallery np.random.seed(0) n_bins = 10 x = np.random.randn(1000, 3) fig, axes = plt.subplots(nrows=2, ncols=2) ax0, ax1, ax2, ax3 = axes.flatten() colors = ['red', 'tan', 'lime'] ax0.hist(x, n_bins, density=1, histtype='bar', color=colors, label=colors) ax0.legend(prop={'size': 10}) ax0.set_title('bars with legend') ax1.hist(x, n_bins, density=1, histtype='bar', stacked=True) ax1.set_title('stacked bar') ax2.hist(x, n_bins, histtype='step', stacked=True, fill=False) ax2.set_title('stack step (unfilled)') # Make a multiple-histogram of data-sets with different length. x_multi = [np.random.randn(n) for n in [10000, 5000, 2000]] ax3.hist(x_multi, n_bins, histtype='bar') ax3.set_title('different sample sizes') fig.tight_layout() plt.show() fig.canvas.toolbar_position = 'right' fig.canvas.toolbar_visible = False ``` # Interactions with other widgets and layouting When you want to embed the figure into a layout of other widgets you should call `plt.ioff()` before creating the figure otherwise `plt.figure()` will trigger a display of the canvas automatically and outside of your layout. ### Without using `ioff` Here we will end up with the figure being displayed twice. The button won't do anything it just placed as an example of layouting. ``` import ipywidgets as widgets # ensure we are interactive mode # this is default but if this notebook is executed out of order it may have been turned off plt.ion() fig = plt.figure() ax = fig.gca() ax.imshow(Z) widgets.AppLayout( center=fig.canvas, footer=widgets.Button(icon='check'), pane_heights=[0, 6, 1] ) ``` ### Fixing the double display with `ioff` If we make sure interactive mode is off when we create the figure then the figure will only display where we want it to. There is ongoing work to allow usage of `ioff` as a context manager, see the [ipympl issue](https://github.com/matplotlib/ipympl/issues/220) and the [matplotlib issue](https://github.com/matplotlib/matplotlib/issues/17013) ``` plt.ioff() fig = plt.figure() plt.ion() ax = fig.gca() ax.imshow(Z) widgets.AppLayout( center=fig.canvas, footer=widgets.Button(icon='check'), pane_heights=[0, 6, 1] ) ``` # Interacting with other widgets ## Changing a line plot with a slide ``` # When using the `widget` backend from ipympl, # fig.canvas is a proper Jupyter interactive widget, which can be embedded in # an ipywidgets layout. See https://ipywidgets.readthedocs.io/en/stable/examples/Layout%20Templates.html # One can bound figure attributes to other widget values. from ipywidgets import AppLayout, FloatSlider plt.ioff() slider = FloatSlider( orientation='horizontal', description='Factor:', value=1.0, min=0.02, max=2.0 ) slider.layout.margin = '0px 30% 0px 30%' slider.layout.width = '40%' fig = plt.figure() fig.canvas.header_visible = False fig.canvas.layout.min_height = '400px' plt.title('Plotting: y=sin({} * x)'.format(slider.value)) x = np.linspace(0, 20, 500) lines = plt.plot(x, np.sin(slider.value * x)) def update_lines(change): plt.title('Plotting: y=sin({} * x)'.format(change.new)) lines[0].set_data(x, np.sin(change.new * x)) fig.canvas.draw() fig.canvas.flush_events() slider.observe(update_lines, names='value') AppLayout( center=fig.canvas, footer=slider, pane_heights=[0, 6, 1] ) ``` ## Update image data in a performant manner Two useful tricks to improve performance when updating an image displayed with matplolib are to: 1. Use the `set_data` method instead of calling imshow 2. Precompute and then index the array ``` # precomputing all images x = np.linspace(0,np.pi,200) y = np.linspace(0,10,200) X,Y = np.meshgrid(x,y) parameter = np.linspace(-5,5) example_image_stack = np.sin(X)[None,:,:]+np.exp(np.cos(Y[None,:,:]*parameter[:,None,None])) plt.ioff() fig = plt.figure() plt.ion() im = plt.imshow(example_image_stack[0]) def update(change): im.set_data(example_image_stack[change['new']]) fig.canvas.draw_idle() slider = widgets.IntSlider(value=0, min=0, max=len(parameter)-1) slider.observe(update, names='value') widgets.VBox([slider, fig.canvas]) ``` ### Debugging widget updates and matplotlib callbacks If an error is raised in the `update` function then will not always display in the notebook which can make debugging difficult. This same issue is also true for matplotlib callbacks on user events such as mousemovement, for example see [issue](https://github.com/matplotlib/ipympl/issues/116). There are two ways to see the output: 1. In jupyterlab the output will show up in the Log Console (View > Show Log Console) 2. using `ipywidgets.Output` Here is an example of using an `Output` to capture errors in the update function from the previous example. To induce errors we changed the slider limits so that out of bounds errors will occur: From: `slider = widgets.IntSlider(value=0, min=0, max=len(parameter)-1)` To: `slider = widgets.IntSlider(value=0, min=0, max=len(parameter)+10)` If you move the slider all the way to the right you should see errors from the Output widget ``` plt.ioff() fig = plt.figure() plt.ion() im = plt.imshow(example_image_stack[0]) out = widgets.Output() @out.capture() def update(change): with out: if change['name'] == 'value': im.set_data(example_image_stack[change['new']]) fig.canvas.draw_idle slider = widgets.IntSlider(value=0, min=0, max=len(parameter)+10) slider.observe(update) display(widgets.VBox([slider, fig.canvas])) display(out) ```
github_jupyter
<a href="https://colab.research.google.com/github/jdz014/DS-Unit-2-Applied-Modeling/blob/master/module2-wrangle-ml-datasets/LS_DS12_232_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Lambda School Data Science *Unit 2, Sprint 3, Module 1* --- # Wrangle ML datasets - [ ] Continue to clean and explore your data. - [ ] For the evaluation metric you chose, what score would you get just by guessing? - [ ] Can you make a fast, first model that beats guessing? **We recommend that you use your portfolio project dataset for all assignments this sprint.** **But if you aren't ready yet, or you want more practice, then use the New York City property sales dataset for today's assignment.** Follow the instructions below, to just keep a subset for the Tribeca neighborhood, and remove outliers or dirty data. [Here's a video walkthrough](https://youtu.be/pPWFw8UtBVg?t=584) you can refer to if you get stuck or want hints! - Data Source: [NYC OpenData: NYC Citywide Rolling Calendar Sales](https://data.cityofnewyork.us/dataset/NYC-Citywide-Rolling-Calendar-Sales/usep-8jbt) - Glossary: [NYC Department of Finance: Rolling Sales Data](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) Your code starts here: ``` !wget 'https://raw.githubusercontent.com/washingtonpost/data-school-shootings/master/school-shootings-data.csv' import pandas as pd df = pd.read_csv('school-shootings-data.csv') print(df.shape) df.head() # Replace shooting type with 'other' for rows not 'targeted' or 'indiscriminate' df['shooting_type'] = df['shooting_type'].replace(['accidental', 'unclear', 'targeted and indiscriminate', 'public suicide', 'hostage suicide', 'accidental or targeted', 'public suicide (attempted)'], 'other') # Fill missing value with 'other' df['shooting_type'] = df['shooting_type'].fillna('other') # Majority class baseline 59% df['shooting_type'].value_counts(normalize=True) from sklearn.model_selection import train_test_split # Create train, test train, test = train_test_split(df, train_size=0.80, random_state=21, stratify=df['shooting_type']) train.shape, test.shape def wrangle(df): # Avoid SettingWithCopyWarning df = df.copy() # Remove commas from numbers df['white'] = df['white'].str.replace(",", "") # Change from object to int df['white'] = pd.to_numeric(df['white']) # Remove commas from numbers df['enrollment'] = df['enrollment'].str.replace(",", "") # Change from object to int df['enrollment'] = pd.to_numeric(df['enrollment']) # Fill missing values for these specific columns df.fillna({'white': 0, 'black': 0, 'hispanic': 0, 'asian': 0, 'american_indian_alaska_native': 0, 'hawaiian_native_pacific_islander': 0, 'two_or_more': 0, 'district_name': 'Unknown', 'time': '12:00 PM', 'lat': 33.612910, 'long': -86.682000, 'staffing': 60.42, 'low_grade': '9', 'high_grade': '12'}, inplace=True) # Drop columns with 200+ missing values df = df.drop(columns=['deceased_notes1', 'age_shooter2', 'gender_shooter2', 'race_ethnicity_shooter2', 'shooter_relationship2', 'shooter_deceased2', 'deceased_notes2']) # Drop unusable variance df = df.drop(columns=['uid', 'nces_school_id', 'nces_district_id', 'weapon', 'weapon_source', 'state_fips', 'county_fips', 'ulocale', 'lunch', 'age_shooter1', 'gender_shooter1', 'race_ethnicity_shooter1', 'shooter_relationship1', 'shooter_deceased1']) # Change date to datettime df['date'] = pd.to_datetime(df['date']) return df train = wrangle(train) test = wrangle(test) train.shape, test.shape !pip install category_encoders==2.* import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.model_selection import cross_val_score from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import f_classif, SelectKBest from sklearn.linear_model import Ridge target = 'shooting_type' features = train.columns.drop([target, 'date']) X_train = train[features] y_train = train[target] X_test = test[features] y_test = test[target] pipeline = make_pipeline( ce.OrdinalEncoder(), StandardScaler(), RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=21) ) k = 20 scores = cross_val_score(pipeline, X_train, y_train, cv=k) print(f'MAE for {k} folds:', scores) scores.mean() from sklearn.tree import DecisionTreeClassifier target = 'shooting_type' features = train.columns.drop([target, 'date', ]) X_train = train[features] y_train = train[target] X_test = test[features] y_test = test[target] pipeline = make_pipeline( ce.OrdinalEncoder(), DecisionTreeClassifier(max_depth=3) ) pipeline.fit(X_train, y_train) print('Test Accuracy:', pipeline.score(X_test, y_test)) ```
github_jupyter