text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
# Movie Recommender System ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import ast %matplotlib inline movies = pd.read_csv('tmdb_5000_movies.csv') credits = pd.read_csv('tmdb_5000_credits.csv') movies.head() credits.head() movies = movies.merge(credits, on='title') movies.shape movies = movies[['movie_id','title','overview','genres','keywords','cast','crew']] movies.head() movies.isnull().sum() movies.dropna(inplace=True) movies.duplicated().sum() def get_genres(x): genre = [] for i in ast.literal_eval(x): genre.append(i['name']) return genre movies['genres'] = movies['genres'].apply(lambda x: get_genres(x)) movies['keywords'] = movies['keywords'].apply(lambda x: get_genres(x)) movies.head(1) def get_actor(x): actor_name = [] counter = 0 for i in ast.literal_eval(x): if counter != 3: actor_name.append(i['name']) counter += 1 else: break return actor_name movies['cast'] = movies['cast'].apply(lambda x: get_actor(x)) movies.head(1) def get_director(x): director_name = [] for i in ast.literal_eval(x): if i['job'] == 'Director': director_name.append(i['name']) return director_name movies['crew'] = movies['crew'].apply(lambda x: get_director(x)) movies.head() movies['overview'] = movies['overview'].apply(lambda x: x.split()) movies movies['genres'] = movies['genres'].apply(lambda x: [i.replace(' ','') for i in x]) movies['keywords'] = movies['keywords'].apply(lambda x: [i.replace(' ','') for i in x]) movies['cast'] = movies['cast'].apply(lambda x: [i.replace(' ','') for i in x]) movies['crew'] = movies['crew'].apply(lambda x: [i.replace(' ','') for i in x]) movies.head() movies['tags'] = movies['overview'] + movies['genres'] + movies['keywords'] + movies['cast'] + movies['crew'] df = movies[['movie_id','title','tags']] df df['tags'] = df['tags'].apply(lambda x: ' '.join(x)) df['tags'] = df['tags'].apply(lambda x: x.lower()) import nltk from nltk.stem.porter import PorterStemmer ps = PorterStemmer() def stem(x): stem_words = [] for i in x.split(): stem_words.append(ps.stem(i)) return ' '.join(stem_words) df['tags'] = df['tags'].apply(lambda x: stem(x)) from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer(max_features=5000,stop_words='english') cv.fit_transform(df['tags']).toarray().shape vectors = cv.fit_transform(df['tags']).toarray() cv.get_feature_names() from sklearn.metrics.pairwise import cosine_similarity similarity = cosine_similarity(vectors) similarity distances = sorted(enumerate(similarity[0]),reverse=True,key = lambda x: x[1])[1:6] distances def recommend(movie): movie_index = df[df['title'] == movie].index[0] distances = similarity[movie_index] movie_list = sorted(enumerate(distances),reverse=True,key = lambda x: x[1])[1:6] for i in movie_list: print(df['title'][i[0]]) recommend('Avatar') import pickle pickle.dump(df,open('movies.pkl','wb')) pickle.dump(similarity,open('similarity.pkl','wb')) ```
github_jupyter
``` !pip install transformers # generics import pandas as pd import numpy as np from tqdm import tqdm import re from collections import defaultdict import matplotlib.pyplot as plt import random !pip install pytypo import pytypo from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.utils.data import DataLoader, Dataset from sklearn.metrics import confusion_matrix, classification_report, f1_score import torch.nn.functional as F from transformers import BertTokenizer, AutoModel, BertConfig, TFBertModel, AdamW, get_linear_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, BertConfig, get_constant_schedule_with_warmup # import warnings # warnings.filterwarnings('FutureWarning') device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu") device from google.colab import drive drive.mount('/content/drive') def set_seed(seed_value=42): """Set seed for reproducibility. """ random.seed(seed_value) np.random.seed(seed_value) torch.manual_seed(seed_value) torch.cuda.manual_seed_all(seed_value) set_seed(29092020) tokenizer = BertTokenizer.from_pretrained("indobenchmark/indobert-base-p1") model = AutoModel.from_pretrained("indobenchmark/indobert-base-p1") # df = pd.read_excel('./drive/My Drive/satdat/dataset.xlsx') df_train = pd.read_csv('./drive/My Drive/satdat/train.csv') df_val = pd.read_csv('./drive/My Drive/satdat/val.csv') test = pd.read_csv('./drive/My Drive/satdat/datatest_labelled.csv') # df_train, df_val = train_test_split(df, test_size=0.1, random_state=42) # df_train.to_csv("./drive/My Drive/satdat/b_train.csv") # df_val.to_csv('./drive/My Drive/satdat/b_val.csv') def clean(text) : text_cleaning_re = "@\S+|https?:\S+|http?:\S|[#]+|[^A-Za-z0-9]+" text_cleaning_hash = "#[A-Za-z0-9]+" text_cleaning_num = "(^|\W)\d+" text = re.sub(text_cleaning_hash, " ", text).strip() text = re.sub(text_cleaning_num, " ", text).strip() text = re.sub(text_cleaning_re, " ", text).strip() text = text.strip() out = [] for word in text.split() : # try : # out.append(word.replace(word, slang[word])) # except Exception as e : out.append(word) return pytypo.correct_sentence(" ".join(out).strip()) slang = pd.read_csv('./drive/My Drive/satdat/slang.csv') slang = slang[['slang', 'formal']] slang = slang.set_index('slang')['formal'].to_dict() df_train.narasi = df_train.narasi.apply(lambda x: clean(x)) df_train.judul = df_train.judul.apply(lambda x: clean(x)) df_val.narasi = df_val.narasi.apply(lambda x: clean(x)) df_val.judul = df_val.judul.apply(lambda x: clean(x)) test.narasi = test.narasi.apply(lambda x: clean(x)) test.judul = test.judul.apply(lambda x: clean(x)) class HoaxDataset(Dataset) : def __init__(self, feature1, feature2, label, tokenizer, max_len, no_label=False) : self.feature1 = feature1 self.feature2 = feature2 self.label = label self.tokenizer = tokenizer self.max_len = max_len self.no_label = no_label def __len__(self) : return len(self.feature1) def __getitem__(self, item) : feature1 = str(self.feature1[item]) feature2 = str(self.feature2[item]) if not self.no_label: label = self.label[item] encoding1 = tokenizer.encode_plus( # ntar diganti <---------------------------------------------------- feature1, max_length=64, add_special_tokens=True, return_token_type_ids=False, return_attention_mask=True, truncation=True, pad_to_max_length=True, return_tensors='pt' ) encoding2 = tokenizer.encode_plus( feature2, max_length=32, add_special_tokens=True, return_token_type_ids=False, return_attention_mask=True, truncation=True, pad_to_max_length=True, return_tensors='pt' ) if not self.no_label : return { 'narasi_text' : feature1, 'narasi_input_ids' : encoding1['input_ids'].flatten(), 'narasi_attention_mask' : encoding1['attention_mask'].flatten(), 'judul_narasi_text' : feature2, 'judul_input_ids' : encoding2['input_ids'].flatten(), 'judul_attention_mask' : encoding2['attention_mask'].flatten(), 'label' : torch.tensor(label, dtype=torch.long) } else : return { 'narasi_text' : feature1, 'narasi_input_ids' : encoding1['input_ids'].flatten(), 'narasi_attention_mask' : encoding1['attention_mask'].flatten(), 'judul_narasi_text' : feature2, 'judul_input_ids' : encoding2['input_ids'].flatten(), 'judul_attention_mask' : encoding2['attention_mask'].flatten(), } def to_data_loader(df, columns, label, tokenizer, max_len, batch_size) : ds = HoaxDataset( df[columns[0]], df[columns[1]], df[label], tokenizer=tokenizer, max_len=max_len, ) return DataLoader( ds, batch_size=batch_size, ) def test_to_data_loader(df, columns, tokenizer, max_len, batch_size) : ds = HoaxDataset( df[columns[0]], df[columns[1]], None, tokenizer=tokenizer, max_len=max_len, no_label=True ) return DataLoader( ds, batch_size=batch_size, ) train_data_loader = to_data_loader(df_train, ['narasi', 'judul'], 'label', tokenizer, 64, 32) val_data_loader = to_data_loader(df_val, ['narasi', 'judul'], 'label', tokenizer, 64, 32) test_data_loader = test_to_data_loader(test, ['narasi', 'judul'], tokenizer, 64, 32) data = next(iter(test_data_loader)) data.keys() class HoaxClassifier(nn.Module) : def __init__(self, n_classes) : super(HoaxClassifier, self).__init__() config = BertConfig.from_pretrained('indobenchmark/indobert-base-p1') self.bert1 = AutoModel.from_pretrained("indobenchmark/indobert-base-p1", config=config) self.bert2 = AutoModel.from_pretrained("indobenchmark/indobert-base-p1", config=config) self.drop = nn.Dropout(p=0.5) self.relu = nn.ReLU() self.tanh = nn.Tanh() self.dual_bert = nn.Linear(self.bert1.config.hidden_size * 2, 32) self.out = nn.Linear(32, 2) def forward(self, narasi_input_ids, narasi_attention_mask, judul_input_ids, judul_attention_mask) : _, pooled_output1 = self.bert1( input_ids = narasi_input_ids, attention_mask = narasi_attention_mask ) _, pooled_output2 = self.bert2( input_ids = judul_input_ids, attention_mask = judul_attention_mask ) x = torch.cat((pooled_output1, pooled_output2), dim=1) x = self.drop(x) x = self.dual_bert(x) x = self.tanh(x) x = self.drop(x) x = self.out(x) return x model = HoaxClassifier(2) model.to(device) # load freezed only if already exist # model.load_state_dict(torch.load('/content/drive/My Drive/satdat/freezed_state.bin')) # toggle to train non embeddings model.bert1.embeddings.requires_grad_=True model.bert2.embeddings.requires_grad_=True EPOCHS = 8 opt = AdamW(model.parameters(), lr=3e-5, correct_bias=False, weight_decay=1e-4) total_steps = len(train_data_loader) * EPOCHS scheduler = get_constant_schedule_with_warmup( opt, num_warmup_steps=0, # num_training_steps=total_steps, ) loss_function = nn.CrossEntropyLoss().to(device) def train_epoch (model, data_loader, loss_fn, optimizer, device, scheduler, n_examples) : model = model.train() correct_predictions = 0 losses = [] for d in data_loader : input_ids1 = d['narasi_input_ids'].to(device) input_ids2 = d['judul_input_ids'].to(device) input_mask1 = d['narasi_attention_mask'].to(device) input_mask2 = d['judul_attention_mask'].to(device) label = d['label'].to(device) outputs = model( input_ids1, input_mask1, input_ids2, input_mask2 ) _, preds = torch.max(outputs, dim=1) loss = loss_fn(outputs, label) correct_predictions += torch.sum(preds == label) losses.append(loss.item()) loss.backward() nn.utils.clip_grad_norm(model.parameters(), max_norm=1.0) optimizer.step() scheduler.step() optimizer.zero_grad() return correct_predictions.double() / n_examples, np.mean(losses) def eval_model(model, data_loader, loss_fn, device, n_examples) : model = model.eval() losses = [] correct_predictions=0 with torch.no_grad() : for d in data_loader : input_ids1 = d['narasi_input_ids'].to(device) input_ids2 = d['judul_input_ids'].to(device) input_mask1 = d['narasi_attention_mask'].to(device) input_mask2 = d['judul_attention_mask'].to(device) label = d['label'].to(device) outputs = model( input_ids1, input_mask1, input_ids2, input_mask2 ) _, preds = torch.max(outputs, dim=1) loss = loss_fn(outputs, label) correct_predictions += torch.sum(preds == label) losses.append(loss.item()) return correct_predictions.double() / n_examples, np.mean(losses) %%time history = defaultdict(list) best_accuracy = 0 for epoch in range(EPOCHS): print(f'Epoch {epoch + 1}/{EPOCHS}') print('-' * 10) train_acc, train_loss = train_epoch( model, train_data_loader, loss_function, opt, device, scheduler, len(df_train) ) print(f'Train loss {train_loss} accuracy {train_acc}') val_acc, val_loss = eval_model( model, val_data_loader, loss_function, device, len(df_val) ) print(f'Val loss {val_loss} accuracy {val_acc}') print() history['train_acc'].append(train_acc) history['train_loss'].append(train_loss) history['val_acc'].append(val_acc) history['val_loss'].append(val_loss) if val_acc > best_accuracy: torch.save(model.state_dict(), 'best_model.bin') best_accuracy = val_acc def get_predictions(model, data_loader): model = model.eval() predictions = [] prediction_probs = [] with torch.no_grad(): for d in data_loader: input_ids1 = d['narasi_input_ids'].to(device) input_ids2 = d['judul_input_ids'].to(device) input_mask1 = d['narasi_attention_mask'].to(device) input_mask2 = d['judul_attention_mask'].to(device) outputs = model( input_ids1, input_mask1, input_ids2, input_mask2 ) _, preds = torch.max(outputs, dim=1) probs = F.softmax(outputs, dim=1) predictions.extend(preds) prediction_probs.extend(probs) predictions = torch.stack(predictions).cpu() prediction_probs = torch.stack(prediction_probs).cpu() return predictions, prediction_probs # load best model model.load_state_dict(torch.load('./best_model.bin')) y_pred, y_pred_probs = get_predictions( model, test_data_loader ) y_pred print(classification_report(list(test['label']), y_pred)) print(f1_score(list(test['label']), y_pred, average='micro')) import itertools from sklearn.metrics import confusion_matrix, classification_report, accuracy_score def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ # cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title, fontsize=20) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, fontsize=13) plt.yticks(tick_marks, classes, fontsize=13) fmt = '.2f' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label', fontsize=17) plt.xlabel('Predicted label', fontsize=17) cnf_matrix = confusion_matrix(test.label.to_list(), y_pred) plt.figure(figsize=(6,6)) plot_confusion_matrix(cnf_matrix, classes=['0', '1'], title="Confusion matrix") plt.show() ```
github_jupyter
# Quantization of Image Classification Models This tutorial demostrates how to apply INT8 quantization to Image Classification model using [Post-training Optimization Tool API](../../compression/api/README.md). The Mobilenet V2 model trained on Cifar10 dataset is used as an example. The code of this tutorial is designed to be extandable to custom model and dataset. It is assumed that OpenVINO is already installed. This tutorial consists of the following steps: - Prepare the model for quantization - Define data loading and accuracy validation functionality - Run optimization pipeline - Compare accuracy of the original and quantized models - Compare performance of the original and quantized models - Compare results on one picture ``` import os from pathlib import Path import matplotlib.pyplot as plt import numpy as np import torch from addict import Dict from compression.api import DataLoader, Metric from compression.engines.ie_engine import IEEngine from compression.graph import load_model, save_model from compression.graph.model_utils import compress_model_weights from compression.pipeline.initializer import create_pipeline from openvino.runtime import Core from torchvision import transforms from torchvision.datasets import CIFAR10 # Set the data and model directories DATA_DIR = 'data' MODEL_DIR = 'model' os.makedirs(DATA_DIR, exist_ok=True) os.makedirs(MODEL_DIR, exist_ok=True) ``` ## Prepare the Model Model preparation stage has the following steps: - Download PyTorch model from Torchvision repository - Convert it to ONNX format - Run OpenVINO Model Optimizer tool to convert ONNX to OpenVINO Intermediate Representation (IR) ``` model = torch.hub.load("chenyaofo/pytorch-cifar-models", "cifar10_mobilenetv2_x1_0", pretrained=True) model.eval() dummy_input = torch.randn(1, 3, 32, 32) onnx_model_path = Path(MODEL_DIR) / 'mobilenet_v2.onnx' ir_model_xml = onnx_model_path.with_suffix('.xml') ir_model_bin = onnx_model_path.with_suffix('.bin') torch.onnx.export(model, dummy_input, onnx_model_path, verbose=True) # Run OpenVINO Model Optimization tool to convert ONNX to OpenVINO IR !mo --framework=onnx --data_type=FP16 --input_shape=[1,3,32,32] -m $onnx_model_path --output_dir $MODEL_DIR ``` ## Define Data Loader At this step the `DataLoader` interface from POT API is implemented. ``` transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))]) dataset = CIFAR10(root=DATA_DIR, train=False, transform=transform, download=True) # create DataLoader from CIFAR10 dataset class CifarDataLoader(DataLoader): def __init__(self, config): """ Initialize config and dataset. :param config: created config with DATA_DIR path. """ if not isinstance(config, Dict): config = Dict(config) super().__init__(config) self.indexes, self.pictures, self.labels = self.load_data(dataset) def __len__(self): return len(self.labels) def __getitem__(self, index): """ Return one sample of index, label and picture. :param index: index of the taken sample. """ if index >= len(self): raise IndexError return (self.indexes[index], self.labels[index]), self.pictures[index].numpy() def load_data(self, dataset): """ Load dataset in needed format. :param dataset: downloaded dataset. """ pictures, labels, indexes = [], [], [] for idx, sample in enumerate(dataset): pictures.append(sample[0]) labels.append(sample[1]) indexes.append(idx) return indexes, pictures, labels ``` ## Define Accuracy Metric Calculation At this step the `Metric` interface for accuracy Top-1 metric is implemented. It is used for validating accuracy of quantized model. ``` # Custom implementation of classification accuracy metric. class Accuracy(Metric): # Required methods def __init__(self, top_k=1): super().__init__() self._top_k = top_k self._name = 'accuracy@top{}'.format(self._top_k) self._matches = [] @property def value(self): """ Returns accuracy metric value for the last model output. """ return {self._name: self._matches[-1]} @property def avg_value(self): """ Returns accuracy metric value for all model outputs. """ return {self._name: np.ravel(self._matches).mean()} def update(self, output, target): """ Updates prediction matches. :param output: model output :param target: annotations """ if len(output) > 1: raise Exception('The accuracy metric cannot be calculated ' 'for a model with multiple outputs') if isinstance(target, dict): target = list(target.values()) predictions = np.argsort(output[0], axis=1)[:, -self._top_k:] match = [float(t in predictions[i]) for i, t in enumerate(target)] self._matches.append(match) def reset(self): """ Resets collected matches """ self._matches = [] def get_attributes(self): """ Returns a dictionary of metric attributes {metric_name: {attribute_name: value}}. Required attributes: 'direction': 'higher-better' or 'higher-worse' 'type': metric type """ return {self._name: {'direction': 'higher-better', 'type': 'accuracy'}} ``` ## Run Quantization Pipeline and compare the accuracy of the original and quantized models Here we define a configuration for our quantization pipeline and run it. NOTE: we use built-in `IEEngine` implementation of the `Engine` interface from the POT API for model inference. `IEEngine` is built on top of OpenVINO Python* API for inference and provides basic functionality for inference of simple models. If you have a more complicated inference flow for your model/models you should create your own implementation of `Engine` interface, for example by inheriting from `IEEngine` and extending it. ``` model_config = Dict({ 'model_name': 'mobilenet_v2', 'model': ir_model_xml, 'weights': ir_model_bin }) engine_config = Dict({ 'device': 'CPU', 'stat_requests_number': 2, 'eval_requests_number': 2 }) dataset_config = { 'data_source': DATA_DIR } algorithms = [ { 'name': 'DefaultQuantization', 'params': { 'target_device': 'CPU', 'preset': 'performance', 'stat_subset_size': 300 } } ] # Steps 1-7: Model optimization # Step 1: Load the model. model = load_model(model_config) # Step 2: Initialize the data loader. data_loader = CifarDataLoader(dataset_config) # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric. metric = Accuracy(top_k=1) # Step 4: Initialize the engine for metric calculation and statistics collection. engine = IEEngine(engine_config, data_loader, metric) # Step 5: Create a pipeline of compression algorithms. pipeline = create_pipeline(algorithms, engine) # Step 6: Execute the pipeline. compressed_model = pipeline.run(model) # Step 7 (Optional): Compress model weights quantized precision # in order to reduce the size of final .bin file. compress_model_weights(compressed_model) # Step 8: Save the compressed model to the desired path. compressed_model_paths = save_model(model=compressed_model, save_path=MODEL_DIR, model_name="quantized_mobilenet_v2" ) compressed_model_xml = compressed_model_paths[0]["model"] compressed_model_bin = Path(compressed_model_paths[0]["model"]).with_suffix(".bin") # Step 9: Compare accuracy of the original and quantized models. metric_results = pipeline.evaluate(model) if metric_results: for name, value in metric_results.items(): print(f"Accuracy of the original model: {name}: {value}") metric_results = pipeline.evaluate(compressed_model) if metric_results: for name, value in metric_results.items(): print(f"Accuracy of the optimized model: {name}: {value}") ``` ## Compare Performance of the Original and Quantized Models Finally, we will measure the inference performance of the FP32 and INT8 models. To do this, we use [Benchmark Tool](https://docs.openvinotoolkit.org/latest/openvino_inference_engine_tools_benchmark_tool_README.html) - OpenVINO's inference performance measurement tool. NOTE: For more accurate performance, we recommended running benchmark_app in a terminal/command prompt after closing other applications. Run benchmark_app -m model.xml -d CPU to benchmark async inference on CPU for one minute. Change CPU to GPU to benchmark on GPU. Run benchmark_app --help to see an overview of all command line options. ``` # Inference FP16 model (IR) !benchmark_app -m $ir_model_xml -d CPU -api async # Inference INT8 model (IR) !benchmark_app -m $compressed_model_xml -d CPU -api async ``` ## Compare results on four pictures. ``` ie = Core() # read and load float model float_model = ie.read_model( model=ir_model_xml, weights=ir_model_bin ) float_compiled_model = ie.compile_model(model=float_model, device_name="CPU") # read and load quantized model quantized_model = ie.read_model( model=compressed_model_xml, weights=compressed_model_bin ) quantized_compiled_model = ie.compile_model(model=quantized_model, device_name="CPU") # define all possible labels from CIFAR10 labels_names = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"] all_pictures = [] all_labels = [] # get all pictures and their labels for i, batch in enumerate(data_loader): all_pictures.append(batch[1]) all_labels.append(batch[0][1]) def plot_pictures(indexes: list, all_pictures=all_pictures, all_labels=all_labels): """Plot 4 pictures. :param indexes: a list of indexes of pictures to be displayed. :param all_batches: batches with pictures. """ images, labels = [], [] num_pics = len(indexes) assert num_pics == 4, f'No enough indexes for pictures to be displayed, got {num_pics}' for idx in indexes: assert idx < 10000, 'Cannot get such index, there are only 10000' pic = np.rollaxis(all_pictures[idx].squeeze(), 0, 3) images.append(pic) labels.append(labels_names[all_labels[idx]]) f, axarr = plt.subplots(1, 4) axarr[0].imshow(images[0]) axarr[0].set_title(labels[0]) axarr[1].imshow(images[1]) axarr[1].set_title(labels[1]) axarr[2].imshow(images[2]) axarr[2].set_title(labels[2]) axarr[3].imshow(images[3]) axarr[3].set_title(labels[3]) def infer_on_pictures(model, indexes: list, all_pictures=all_pictures): """ Inference model on a few pictures. :param net: model on which do inference :param indexes: list of indexes """ predicted_labels = [] request = model.create_infer_request() for idx in indexes: assert idx < 10000, 'Cannot get such index, there are only 10000' request.infer(inputs={'input.1': all_pictures[idx][None,]}) result = request.get_output_tensor(0).data result = labels_names[np.argmax(result[0])] predicted_labels.append(result) return predicted_labels indexes_to_infer = [7, 12, 15, 20] # to plot specify 4 indexes plot_pictures(indexes_to_infer) results_float = infer_on_pictures(float_compiled_model, indexes_to_infer) results_quanized = infer_on_pictures(quantized_compiled_model, indexes_to_infer) print(f"Labels for picture from float model : {results_float}.") print(f"Labels for picture from quantized model : {results_quanized}.") ```
github_jupyter
# Using PLIO to analyze control networks PLIO is a general purpose library for reading data from various sources. In this workshop, we will be using PLIO's ability to read ISIS control networks into a Pandas dataframe. ``` # PLIO uses pysis for some other things. We don't technically need this but it avoids a warning. import os os.environ['ISISROOT'] = '/usgs/cpkgs/anaconda3_linux/envs/isis4.3.0' os.environ['ISISDATA'] = '/usgs/cpkgs/isis3/isis_data' # 3D plotting toolkit for matplotlib from mpl_toolkits.mplot3d import Axes3D # Numerical Python library import numpy as np ``` # Our networks All of this data was generously provided by Lynn Weller and Mike Bland from their Europa control project. The first network is a very rough starting point. The Galileo images of Europa were put through the [findfeatures](https://isis.astrogeology.usgs.gov/Application/presentation/Tabbed/findfeatures/findfeatures.html) application and then all of the resulting networks were merged together. This network has many known issues including islands, massive residuals, and poor coverage. The second network is the final network containing Galileo and Voyager images of Europa. The issues from the initial network have been resolved and the final point cloud covers the majority of the body. ``` galileo_net = '/scratch/jmapel/europa/networks/GLL_FFCombined_thin_SubReg2_Del_2.net' final_net = '/scratch/jmapel/europa/networks/GalileoVoyager_Europa_Merged_2020_CilixFree.net' ``` # The control network dataframe PLIO directly ingests the data from the control network file. Each row in the dataframe is a single control measure and each column is a field from the protobuf control network. The data for control points is stored implicitly in its measures. ``` # This function is what reads a control network file from plio.io.io_controlnetwork import from_isis galileo_df = from_isis(galileo_net) galileo_df.describe() ``` ### Exercise: How many measures are there in the network? How many points are there in the network? How many images are there in the network? tip: use len(dataframe) to find the number of rows in a dataframe tip: use dataframe["columnName"].nunique() to find the number of unique values in a column ## Data types The different columns of our dataframe store different types of data. The cell below shows all of the the data types in the dataframe. You can see all of the different possible datatypes for a dataframe in the [pandas docs](https://pandas.pydata.org/pandas-docs/stable/user_guide/basics.html#basics-dtypes). ``` galileo_df.dtypes ``` Most of the data types are straightforward. For example, the line and sample are 64-bit floats. Let's dig into the more unusual types. **pointType, measureType, aprioriSurfPointSource, and aprioriRadiusSource** are 64 bit integers, but those integers correspond to enumerations. For example, a pointType of 2 means Free. See the tables below for all of the enumerations ``` galileo_df[['pointType', 'measureType', 'aprioriSurfPointSource']].head() ``` <center>**pointType**</center> | Value | Name | | ----: | :---------------- | | 0 | Tie (obsolete) | | 1 | Ground (obsolete) | | 2 | Free | | 3 | Constrained | | 4 | Fixed | <center>**measureType**</center> | Value | Name | | ----: | :----------------- | | 0 | Candidate | | 1 | Manual | | 2 | RegisteredPixel | | 3 | RegisteredSubPixel | <center>**aprioriSurfPointSource & aprioriRadiusSource **</center> | Value | Name | | ----: | :---------------- | | 0 | None | | 1 | User | | 2 | AverageOfMeasures | | 3 | Reference | | 4 | Ellipsoid | | 5 | DEM | | 6 | Basemap | | 7 | BundleSolution | ### Exercise: Have any measure in this network been sub-pixel registered? tip: look at the measure types **id, pointChoosername, pointDatetime, aprioriSurfPointSourceFile, aprioriRadiusSourceFile, serialnumber, measureChoosername, and measureDatetime** are all listed as objects but are simply strings. ``` galileo_df[['id', 'serialnumber', 'pointChoosername', 'pointDatetime', 'measureChoosername', 'measureDatetime']].head() ``` **adjustedCovar, pointLog, and measureLog** are more complicated. We will go over adjustedCovar later with the final Euroap network. pointLog is leftover from older network formats and can be ignored. measureLog contains information about the registration of the measure. ``` galileo_df.loc[1,'measureLog'] ``` ## Data availability Depending on how your network was generated and what processing has been done, many fields will not be set. If a numerical field has a value of 0, then it has not been set. For example, our network has not been bundle adjusted, so there are only a priori ground points. ``` galileo_df[['aprioriX', 'aprioriY', 'aprioriZ', 'adjustedX', 'adjustedY', 'adjustedZ']].describe() ``` ### Exercise: Can you find all of the fields that are completely unset in our control network? tip: numerical fields default to 0, strings default to an empty string "", and boolean values default to False. You can also check which columns are default programmaticaly. The following cell checks if all of the values in a column are a default value. ``` (galileo_df==0).all() | (galileo_df=="").all() | (galileo_df==False).all() ``` # Looking at a bundle adjusted control network Our Galileo network is interesting but networks have significantly more useful information in them after bundle adjustment. So, let's take a look at the final Europa network. ``` final_net_df = from_isis(final_net) final_net_df.describe() ``` ### Exercise: What fields are set in the bundle adjusted network that weren't previously? ## Analyzing the measures The data in a control network dataframe is not always in the format we want to work with. The measure residuals are broken down into the line and sample residuals. The following cell computes the full magnitude of the residuals and adds it to the dataframe under the "residualMag" column. ``` final_net_df['residualMag'] = np.sqrt(final_net_df['sampleResidual']**2 + final_net_df['lineResidual']**2) ``` Now let's plot the residuals and see if we can form any theories. The next cell imports matplotlib for plotting tools and then plots the residuals in terms of sample and line residual. Note that the color of points is based on the residual magnitude, whcih should give a nice bullseye effect. ``` # This allows us to interact with our plots. This must be set before importing pyplot %matplotlib notebook # General plotting library import matplotlib import matplotlib.pyplot as plt resid_fig = plt.figure(figsize=(6, 6)) resid_ax = resid_fig.add_subplot(111) resid_scatter = resid_ax.scatter(final_net_df['sampleResidual'], final_net_df['lineResidual'], c=final_net_df['residualMag'], marker='+') resid_ax.set_aspect('equal') plt.axhline(0, color='black') plt.axvline(0, color='black') resid_cbar = plt.colorbar(resid_scatter) resid_fig.suptitle('Bundle Adjusted Measure Residuals') resid_ax.set_xlabel('Sample Residual') resid_ax.set_ylabel('Line Residual') resid_cbar.set_label('Residual Magnitude') plt.show() ``` We can also color our points based on other properties. Let's try and separate the measures out by mission. The serial numbers should help us so let's look at the serial numbers for all of our images. ``` final_net_df['serialnumber'].unique() ``` Each serial number starts with the mission name, which makes separating them out easy. All we need to do is check if the beginning of the serial number matches our mission. The pd.DataFrame.str package allows us to do this type of string comparisons quickly and easily. Here we will use the DataFrame.str.startswith method. ``` final_galileo_df = final_net_df[final_net_df['serialnumber'].str.startswith('Galileo')] final_voyager1_df = final_net_df[final_net_df['serialnumber'].str.startswith('Voyager1')] final_voyager2_df = final_net_df[final_net_df['serialnumber'].str.startswith('Voyager2')] ``` Now let's plot the measures and color them based on their mission. ``` inst_resid_fig = plt.figure(figsize=(6, 6)) inst_resid_ax = inst_resid_fig.add_subplot(111) inst_resid_ax.scatter(final_galileo_df['sampleResidual'], final_galileo_df['lineResidual'], color='Green', marker='+', alpha=0.25, label='Galileo') inst_resid_ax.scatter(final_voyager1_df['sampleResidual'], final_voyager1_df['lineResidual'], color='Red', marker='+', alpha=0.25, label='Voyager1') inst_resid_ax.scatter(final_voyager2_df['sampleResidual'], final_voyager2_df['lineResidual'], color='Blue', marker='+', alpha=0.25, label='Voyager2') inst_resid_ax.set_aspect('equal') plt.axhline(0, color='black') plt.axvline(0, color='black') plt.legend() inst_resid_fig.suptitle('Bundle Adjusted Measure Residuals by Mission') inst_resid_ax.set_xlabel('Sample Residual') inst_resid_ax.set_ylabel('Line Residual') plt.show() ``` ### What can you say about the residuals for the different missions based on our plot? ### Exercise: What the descriptive statistics for the residual magnitude of the Galileo measures? What about for Voyager 1 and Voyager 2? ``` final_galileo_df['residualMag'].describe() final_voyager1_df['residualMag'].describe() final_voyager2_df['residualMag'].describe() ``` ### Do you notice anything interesting about the residual magnitudes for the different instruments? How does this compare to what you noticed with the scatter plot? We can even test if the measure residuals are normally distributed. The following cell performs a chi-squared test to see if the residual magnitudes could reasonably come from a normal distribution. This is important because it will tell us if we have large blunders in our network or systematic error from something like a bad sensor model. ``` # Statistics library from scipy import stats alpha = 1e-3 # 99.999% confidence _, normal_test_result = stats.normaltest(final_voyager1_df['residualMag']) print(f'Chi-squared test statistic: {normal_test_result}') if (normal_test_result < alpha): print("The residuals are normally distributed") else: print("The residuals may not be normally distributed") ``` ## Analyzing the points The information for control points is duplicated for each measure they have. So, the first step in looking at control point data is to extract only the data we want from the dataframe. This will make the dataframe easier to read and it will make things run quicker. To do this, we're going to first extract all of the columns with point data. Then, we're going extract the first measure from each point. After all is said and done, we will have a dataframe with columns related to the point info and only one row for each point. ``` point_columns = ['id', 'pointType', 'pointChoosername', 'pointDatetime', 'pointEditLock', 'pointIgnore', 'pointJigsawRejected', 'aprioriSurfPointSource', 'aprioriSurfPointSourceFile', 'aprioriRadiusSource', 'aprioriRadiusSourceFile', 'latitudeConstrained', 'longitudeConstrained', 'radiusConstrained', 'aprioriX', 'aprioriY', 'aprioriZ', 'aprioriCovar', 'adjustedX', 'adjustedY', 'adjustedZ', 'adjustedCovar', 'pointLog'] final_points_df = final_net_df[point_columns].drop_duplicates('id') final_points_df.describe() ``` Next, we're going to transform the point data so that it's more useful to us. This cell will take the (X, Y, Z) adjusted ground points and convert them to (lat, lon, radius) using a library called pyproj. pyproj is a very powerful projections library and can do many cartofraphic transformations and projections. **Note: This cell will generate a warning because we are using old pyproj.Proj calls which will eventually need to change. For now we can ignore the warning.** ``` # Projection library for switching between rectangular and latitudinal os.environ['PROJ_LIB'] = '/usgs/cpkgs/anaconda3_linux/envs/autocnet/share/proj' import pyproj # Compute the lat/lon/alt europa_radii = [1562600, 1560300, 1559500] ecef = pyproj.Proj(proj='geocent', a=europa_radii[0], b=europa_radii[1], c=europa_radii[2]) lla = pyproj.Proj(proj='latlong', a=europa_radii[0], b=europa_radii[1], c=europa_radii[2]) lon, lat, alt = pyproj.transform(ecef, lla, final_points_df['adjustedX'].values, final_points_df['adjustedY'].values, final_points_df['adjustedZ'].values, radians=True) # Store the data in the dataframe final_points_df['latitude'] = lat final_points_df['longitude'] = lon final_points_df['altitude'] = alt # We will also want the point radii final_points_df['radius'] = np.sqrt(final_points_df['adjustedX']**2 + final_points_df['adjustedY']**2 + final_points_df['adjustedZ']**2) ``` Because of how we defined our projection, the latitude and longitude values will be in radians. Also, the longitude will be in 180 postiive East. You can change this by modifying how you use pyproj but that is outside of this workshop. ``` final_points_df[["latitude", "longitude", "altitude", "radius"]].describe() ``` ### Exercise: Convert the latitude and longitude from radians to degrees: Similar to how we computed the residual magnitude, we want to compute the average residual magnitude for each point. The following cell goes back to our original dataframe, computes the mean point by point, and then saves all of the results in our new dataframe. **Note: This cell can take a while to run because it has to re-access the dataframe for every point** ``` final_points_df["averageResidual"] = 0 for point_id, group in final_net_df.groupby('id'): final_points_df.loc[final_points_df.id == point_id, "averageResidual"] = group['residualMag'].mean() ``` ### Exercise: What is the 95th percentile for the average residuals? ## Plotting the points Now that we have latitudes and longitudes for each point, we can generate some simple plots to look at them. ``` point_map = plt.figure(figsize=(10, 10)) point_ax = point_map.add_subplot(111) point_ax.scatter(final_points_df["longitude"], final_points_df["latitude"], marker='+') point_map.suptitle('Control Points') point_ax.set_xlabel('Longitude') point_ax.set_ylabel('Latitude') plt.show() ``` It can also be helpful to color the points based on different values. The following cell draws the same plot but colors each point based on its average residual. Because the residuals are not uniformly distributed we also apply a lograithmic scale to the colors that you can see in the colorbar. ``` point_resid_map = plt.figure(figsize=(10, 10)) point_resid_ax = point_resid_map.add_subplot(111) point_resid_norm = matplotlib.colors.LogNorm(vmax=final_points_df["averageResidual"].max()) point_resid_scatter = point_resid_ax.scatter(final_points_df["longitude"], final_points_df["latitude"], c=final_points_df["averageResidual"], alpha=0.5, norm=point_resid_norm, marker='+', cmap=plt.get_cmap('plasma')) point_resid_cbar = plt.colorbar(point_resid_scatter) point_resid_map.suptitle('Control Points') point_resid_ax.set_xlabel('Longitude') point_resid_ax.set_ylabel('Latitude') point_resid_cbar.set_label('Average Residual Magnitude') plt.show() ``` Plotting individual points can be helpful getting a general idea for the distribution of the points, but it can be hard to interpret the data in area where there are many points all ontop of each other. So, let's combine near by points and determine the residual based on the region. To do this, we're going to bin the points into a regular grid across the latitude and longitude and then compute the mean within each bin. **Try changing the grid_step value and re-running the two cells** ``` grid_step = 10 final_points_df['lonBin'] = final_points_df['longitude'].apply(lambda x: [e for e in range(-180, 180, grid_step) if e <= x][-1]) final_points_df['latBin'] = final_points_df['latitude'].apply(lambda x: [e for e in range(-90, 90, grid_step) if e <= x][-1]) avg_resid_binned = final_points_df.groupby(['lonBin', 'latBin'])['averageResidual'].mean() filled_data = [] for lon_bin in range(-180, 180, grid_step): for lat_bin in range(-90, 90, grid_step): try: filled_data.append(avg_resid_binned.loc[lon_bin, lat_bin]) except: filled_data.append(0) filled_data = np.array(filled_data).reshape((int(360/grid_step), int(180/grid_step))).T avg_gridded = plt.figure(figsize=(10, 5)) avg_gridded_ax = avg_gridded.add_subplot(111) avg_gridded_plot = avg_gridded_ax.imshow(filled_data, origin='lower', extent= [-180, 180, -90, 90], cmap=plt.get_cmap('plasma')) avg_gridded_ax.scatter(final_points_df["longitude"], final_points_df["latitude"], color='black', marker='+', alpha=0.1) avg_gridded_cbar = plt.colorbar(avg_gridded_plot) avg_gridded.suptitle('Average Residual by lat/lon grid') avg_gridded_ax.set_xlabel('Longitude') avg_gridded_ax.set_ylabel('Latitude') avg_gridded_cbar.set_label('Average Residual Magnitude') plt.show() ``` ## 3D Plotting 2D plotting either requires these simple equal area projections or converting to another projection via pyproj. Instead, let's look at our data in true 3D. The following cell plots the same data as before but plots it in 3d instead of just a 2d projection ``` resid_fig_3d = plt.figure(figsize=(10, 10)) resid_ax_3d = resid_fig_3d.add_subplot(111, projection='3d') resid_plot_3d = resid_ax_3d.scatter(final_points_df['adjustedX'], final_points_df['adjustedY'], final_points_df['adjustedZ'], c=final_points_df["averageResidual"], alpha=0.5, norm=point_resid_norm, marker='+', cmap=plt.get_cmap('plasma')) resid_cbar_3d = plt.colorbar(resid_plot_3d) resid_fig_3d.suptitle('3D Control Points') resid_cbar_3d.set_label('Average Residual Magnitude (pix)') plt.show() ```
github_jupyter
``` %matplotlib inline import pandas as pd import matplotlib.pyplot as plt import datetime import pytz columns = ['Capture_time', 'Id'] data = pd.read_csv('evo_data_menor.csv', usecols=columns, nrows=500000) data.head() print(datetime.datetime.now()) # Colleting vehicle ids car_ids = list(data.Id.unique()) print(datetime.datetime.now()) # Removing uncommon ids # Ex: 4c5865a3-4b03-40f6-a3a8-d4e94aae3b17 car_ids = [id for id in car_ids if id.find('-') == -1] def str_to_datetime(df_time): """ Reformatando de string para datetime. Parameters ---------- df_time : pandas.DataFrame, string Dataframe com strings a serem convertidas para datetime. Returns ---------- date_list : pandas.DataFrame, datetime Dataframe com valores em datetime para possíveis fusos de Vancouver. """ date_list = [] # Formatos de fuso horário comum de Vancouver e # fuso horário característico de horário de verão format_string = ['%Y-%m-%d %H:%M:%S.%f-08:00', '%Y-%m-%d %H:%M:%S.%f-07:00', '%Y-%m-%d %H:%M:%S-08:00', '%Y-%m-%d %H:%M:%S-07:00'] for date in df_time: for fmt in format_string: try: date_list.append(datetime.datetime.strptime(str(date), fmt)) break except: pass return pd.DataFrame(date_list) data['Capture_time'] = str_to_datetime(data['Capture_time']) data.head() parked = 0 andando_weekdays = [] andando_weekends = [] data = data.sort_index(by='Capture_time') data.index = range(len(data)) print(datetime.datetime.now()) # Percorre todo o dataframe para verificar quantos carros estão andando em dado minuto for i in range(1, len(data)): start_time_atual = int(data['Capture_time'].iloc[i].timestamp()) start_time_anterior = int(data['Capture_time'].iloc[i-1].timestamp()) # Enquanto está no mesmo minuto, é analisado se o carro está parado if (start_time_atual == start_time_anterior): parked += 1 else: # Carros viajando são dados pelo total de carros da frota menos os que estão atualmente estacionados in_travel = len(car_ids) - parked porcentagem = (in_travel/len(car_ids))*100 # Verifica que a data está entre segunda(1) e sexta(5) if (int(datetime.datetime.fromtimestamp(start_time_anterior).strftime('%w')) > 0 and int(datetime.datetime.fromtimestamp(start_time_anterior).strftime('%w')) < 6): andando_weekdays.append([start_time_anterior, in_travel, porcentagem]) else: andando_weekends.append([start_time_anterior, in_travel, porcentagem]) parked = 0 print(datetime.datetime.now()) dfIn_Travel_weekdays = pd.DataFrame(andando_weekdays, columns=['capture_time', 'total_in_travel', 'percentage']) dfIn_Travel_weekends = pd.DataFrame(andando_weekends, columns=['capture_time', 'total_in_travel', 'percentage']) dfIn_Travel_weekdays.head() def from_timestamp_list(timestamp_list): datetime_list = [] for date in timestamp_list: datetime_list.append(datetime.datetime.fromtimestamp(int(date))) return pd.DataFrame(datetime_list) # Formatando os dados de unix timestamp para datetime dfWeekdays = dfIn_Travel_weekdays dfWeekdays['capture_time'] = from_timestamp_list(dfWeekdays['capture_time']) dfWeekends = dfIn_Travel_weekends dfWeekends['capture_time'] = from_timestamp_list(dfWeekends['capture_time']) # Plot da porcentagem de carros alocados em dias de semana plt.plot(dfWeekdays['capture_time'],dfWeekdays['percentage']) plt.gcf().autofmt_xdate() plt.show() # Plot da porcentagem de carros alocados em dias de final de semana plt.plot(dfWeekends['capture_time'],dfWeekends['percentage']) plt.gcf().autofmt_xdate() plt.show() dfWeekends.to_csv('weekends.csv', index=False, encoding='utf-8') dfWeekdays.to_csv('weekdays.csv', index=False, encoding='utf-8') dfWeekdays = pd.read_csv('plots/weekdays.csv') dfWeekends = pd.read_csv('plots/weekends.csv') # Debug dfWeekdays.capture_time = pd.to_datetime(dfWeekdays.capture_time) dfWeekdays['minute'] = dfWeekdays.capture_time.dt.minute dfWeekdays['hour'] = dfWeekdays.capture_time.dt.hour # Outlier importante de ser verificado dfWeekdays[(dfWeekdays.hour == 10) & (dfWeekdays.minute == 32)] dfWeekdays['capture_time'] = pd.to_datetime(dfWeekdays['capture_time']) dfWeekends['capture_time'] = pd.to_datetime(dfWeekends['capture_time']) def media(df): """ Faz a media das porcentagens minuto a minuto de todo o dataset. Parameters ------------ df : Pandas dataframe Dados a serem analisados, com uma coluna dos horários e outra com as porcentagens. Returns ---------- media : Pandas dataframe Dados com a média das porcentagens para 24 horas. """ minute = [] # Criando um vetor que irá sinalizar a quantidade de minutos corridos até tal registro for i in range(len(df)): capture_time = df['capture_time'].iloc[i] minute.append(capture_time.minute + (capture_time.hour * 60)) # Ordenando o dataset por minutos corridos para facilitar a soma de valores df['minute'] = minute df = df.sort_values(by=['minute', 'capture_time']) valores = pd.DataFrame() media = [] for i in range(1,len(df)): minute_atual = df['minute'].iloc[i-1] minute_proximo = df['minute'].iloc[i] # Enquanto está no mesmo valor de minutos corridos os valores percentuais # são armazenados para ser calculada a média de tal minuto no intervalo de 24h if (minute_proximo == minute_atual): valores = valores.append([df['percentage'].iloc[i-1]]) else: valores = valores.append([df['percentage'].iloc[i-1]]) media.append([df['capture_time'].iloc[i-1].strftime('%H:%M'), float(valores.mean()), float(valores.std())]) valores = pd.DataFrame() media = pd.DataFrame(media, columns=['time', 'mean', 'std']) return media # Fazendo a média das porcentagens de cada dia dfWeekdays = dfWeekdays.sort_values(by='capture_time') mediaWeekdays = media(dfWeekdays) dfWeekends = dfWeekends.sort_values(by='capture_time') mediaWeekends = media(dfWeekends) mediaWeekdays.to_csv('mediaWeekdays.csv', index=False, encoding='utf-8') mediaWeekends.to_csv('mediaWeekends.csv', index=False, encoding='utf-8') mediaWeekdays = pd.read_csv('plots/mediaWeekdays.csv') mediaWeekends = pd.read_csv('plots/mediaWeekends.csv') import numpy as np # Plot da media das porcentagens dos dias de semana fig, ax = plt.subplots() # Curva dos carros andando ax.plot(range(len(mediaWeekdays['time'])),mediaWeekdays['mean'], label='Carros Ocupados') # Curvas representando o intervalo de desvio padrão ax.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']+mediaWeekdays['std'], alpha=150, c='gray', label='Desvio Padrão') ax.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']-mediaWeekdays['std'], alpha=150, c='gray') # Modificando os labels das horas ax.xaxis.set_ticks(np.arange(0, 1441, 120)) fig.canvas.draw() labels = [item.get_text() for item in ax.get_xticklabels()] labels = range(0,26,2) ax.set_xticklabels(labels) # Legendas e label dos eixos plt.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.2) plt.ylabel('Percentual') plt.xlabel('Horário') # Salvando o plot # plt.savefig('Weekdays_v2.pdf', bbox_inches='tight') plt.show() import numpy as np # Plot da media das porcentagens dos dias de semana fig, ax = plt.subplots() # Curva dos carros andando ax.plot(range(len(mediaWeekends['time'])),mediaWeekends['mean'], label='Carros Reservados') # Curvas representando o intervalo de desvio padrão ax.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']+mediaWeekends['std'], alpha=150, c='gray', label='Desvio Padrão') ax.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']-mediaWeekends['std'], alpha=150, c='gray') # Modificando os labels das horas ax.xaxis.set_ticks(np.arange(0, 1441, 120)) fig.canvas.draw() labels = [item.get_text() for item in ax.get_xticklabels()] labels = range(0,26,2) ax.set_xticklabels(labels) # Legendas e label dos eixos plt.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.2) plt.ylabel('Percentual') plt.xlabel('Horário') # Salvando o plot # plt.savefig('Weekends_v2.pdf', bbox_inches='tight') plt.show() # CSV criado a partir dos dados coletados do arquivo ModoApi_Data_Filter dfTravels = pd.read_csv('travels.csv') dfTravels['Start_time'] = str_to_datetime(dfTravels['Start_time']) dfTravels['End_time'] = str_to_datetime(dfTravels['End_time']) # A função deve receber os valores previamente separados como somente dias de semana ou finais de semana def cont_reservas(dfDays): # Coletando todos os minutos de captura datas = pd.to_datetime(dfDays['capture_time']) datas = pd.DataFrame(datas) dfReservas = pd.concat([dfTravels['Start_time'], dfTravels['End_time']], axis=1) dfReservas = dfReservas.sort_values(by='Start_time') # Outlier que está gerando comparações erroneas dfReservas.drop(65240, axis=0, inplace=True) cont_reservas = 0 reservas = [] # Auxiliar para adquirir o indice da viagem mais proxima que engloba a hora atual proximo_start = 0 for i in range(len(datas)): data = datas['capture_time'].iloc[i] # Auxiliar para evitar analises desnecessárias # start_test = True # Comparando todas as datas aos intervalos das reservas, e vendo se ele faz parte para somar a porcentagem for j in range(proximo_start, len(dfReservas)): if (j == 289): continue if (dfReservas['Start_time'].iloc[j] <= data <= dfReservas['End_time'].iloc[j]): cont_reservas += 1 # Evita comparações desnecessárias com viagens que terminaram antes da hora a ser analisada # Seguindo a ideia de que se a viagem não englobou antes a hora atual # ela não irá englobar as próximas # if (start_test) : # if (proximo_start > 0): proximo_start = j - 1 # else: proximo_start = j # start_test = False # Evita analisar viagens que começaram depois da hora atual if (dfReservas['Start_time'].iloc[j] > data): break porcentagem = (cont_reservas/len(car_ids))*100 reservas.append([data, cont_reservas, porcentagem]) cont_reservas = 0 if (i % 100 == 0): print(str(i) + " "+str(proximo_start)) reservas = pd.DataFrame(reservas, columns=['datetime', 'total_reserves', 'percentage']) return reservas dfR_Weekdays = cont_reservas(dfWeekdays) dfR_Weekends = cont_reservas(dfWeekends) dfR_Weekdays.to_csv('r_weekdays.csv', index=False, encoding='utf-8') dfR_Weekends.to_csv('r_weekends.csv', index=False, encoding='utf-8') dfR_Weekends = pd.read_csv('plots/r_weekends.csv') dfR_Weekends['datetime']= pd.to_datetime(dfR_Weekends['datetime']) dfR_Weekdays = pd.read_csv('plots/r_weekdays.csv') dfR_Weekdays['datetime'] = pd.to_datetime(dfR_Weekdays['datetime']) # Plot da porcentagem de carros alocados em fins de semana plt.plot(dfR_Weekends['datetime'],dfR_Weekends['percentage']) plt.gcf().autofmt_xdate() plt.show() # Plot da porcentagem de carros alocados em dias de semana plt.plot(dfR_Weekdays['datetime'],dfR_Weekdays['percentage']) plt.gcf().autofmt_xdate() plt.show() # Fazendo a média das porcentagens de cada dia dfR_Weekdays = dfR_Weekdays.sort_values(by='datetime') dfR_Weekdays['capture_time'] = dfR_Weekdays['datetime'] dfmediaR_Weekdays = media(dfR_Weekdays, 32) dfR_Weekends = dfR_Weekends.sort_values(by='datetime') dfR_Weekends['capture_time'] = dfR_Weekends['datetime'] dfmediaR_Weekends = media(dfR_Weekends, 20) dfmediaR_Weekdays = pd.read_csv('plots/media_r_weekdays.csv', encoding='utf-8') dfmediaR_Weekends = pd.read_csv('plots/media_r_weekends.csv', encoding='utf-8') import matplotlib import numpy as np matplotlib.rc('font', size=12) # Plot das porcentagens dos fins de semana fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(14,4.5) # Curva dos carros andando ax1.plot(range(len(mediaWeekdays['time'])),mediaWeekdays['mean'], label='Carros Ocupados') # Curvas representando o intervalo de desvio padrão ax1.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']+mediaWeekdays['std'], alpha=150, c='gray') ax1.plot(range(len(mediaWeekdays['time'])), mediaWeekdays['mean']-mediaWeekdays['std'], alpha=150, c='gray') # Curva dos carros reservados ax1.plot(range(len(dfmediaR_Weekdays['time'])),dfmediaR_Weekdays['mean'], label='Carros Reservados', c='r', ls='--') # Curvas representando o intervalo de desvio padrão ax1.plot(range(len(dfmediaR_Weekdays['time'])), dfmediaR_Weekdays['mean']+dfmediaR_Weekdays['std'], alpha=150, c='#FA8072', ls='--') ax1.plot(range(len(dfmediaR_Weekdays['time'])), dfmediaR_Weekdays['mean']-dfmediaR_Weekdays['std'], alpha=150, c='#FA8072', ls='--') # Modificando os labels das horas e das porcentagens ax1.xaxis.set_ticks(np.arange(0, 1441, 120)) ax1.yaxis.set_ticks(np.arange(0, 110, 10)) fig.canvas.draw() labels = [item.get_text() for item in ax1.get_xticklabels()] labels = range(0,26,2) ax1.set_xticklabels(labels) # Eixo y de 0 a 100% ax1.set_ylim([0,100]) # Legendas e label dos eixos ax1.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.2) ax1.set_ylabel('Percentual') ax1.set_xlabel('Horário') # # Curva dos carros andando ax2.plot(range(len(mediaWeekends['time'])),mediaWeekends['mean'], label='Carros Ocupados') # # Curvas representando o intervalo de desvio padrão ax2.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']+mediaWeekends['std'], alpha=150, c='gray') ax2.plot(range(len(mediaWeekends['time'])), mediaWeekends['mean']-mediaWeekends['std'], alpha=150, c='gray') # # Curva dos carros reservados ax2.plot(range(len(dfmediaR_Weekends['time'])),dfmediaR_Weekends['mean'], label='Carros Reservados', c='r', ls='--') # # Curvas representando o intervalo de desvio padrão ax2.plot(range(len(dfmediaR_Weekends['time'])), dfmediaR_Weekends['mean']+dfmediaR_Weekends['std'], alpha=150, c='#FA8072', ls='--') ax2.plot(range(len(dfmediaR_Weekends['time'])), dfmediaR_Weekends['mean']-dfmediaR_Weekends['std'], alpha=150, c='#FA8072', ls='--') # Modificando os labels das horas e das porcentagens ax2.xaxis.set_ticks(np.arange(0, 1441, 120)) ax2.yaxis.set_ticks(np.arange(0, 110, 10)) fig.canvas.draw() labels = [item.get_text() for item in ax2.get_xticklabels()] labels = range(0,26,2) ax2.set_xticklabels(labels) # Eixo y de 0 a 100% ax2.set_ylim([0,100]) # Legendas e label dos eixos ax2.legend(bbox_to_anchor=(0.55, 0.99), loc=2, borderaxespad=0.1) ax2.set_ylabel('Percentual') ax2.set_xlabel('Horário') plt.show() #plt.savefig('plots/ViagensPorHoras_Evo.pdf') ```
github_jupyter
# Activation Functions This function introduces activation functions in TensorFlow We start by loading the necessary libraries for this script. ``` import matplotlib.pyplot as plt import numpy as np import tensorflow as tf # from tensorflow.python.framework import ops # ops.reset_default_graph() tf.reset_default_graph() ``` ### Start a graph session ``` sess = tf.Session() ``` ### Initialize the X range values for plotting ``` x_vals = np.linspace(start=-10., stop=10., num=100) ``` ### Activation Functions: ReLU activation ``` print(sess.run(tf.nn.relu([-3., 3., 10.]))) y_relu = sess.run(tf.nn.relu(x_vals)) ``` ReLU-6 activation ``` print(sess.run(tf.nn.relu6([-3., 3., 10.]))) y_relu6 = sess.run(tf.nn.relu6(x_vals)) ``` ReLU-6 refers to the following function \begin{equation} \min\left(\max(0, x), 6\right) \end{equation} Sigmoid activation ``` print(sess.run(tf.nn.sigmoid([-1., 0., 1.]))) y_sigmoid = sess.run(tf.nn.sigmoid(x_vals)) ``` Hyper Tangent activation ``` print(sess.run(tf.nn.tanh([-1., 0., 1.]))) y_tanh = sess.run(tf.nn.tanh(x_vals)) ``` Softsign activation ``` print(sess.run(tf.nn.softsign([-1., 0., 1.]))) y_softsign = sess.run(tf.nn.softsign(x_vals)) ``` softsign refers to the following function \begin{equation} \frac{x}{1 + |x|} \end{equation} <br> <img src="http://tecmemo.wpblog.jp/wp-content/uploads/2017/01/activation_04.png" width=40%> Softplus activation ![](http://tecmemo.wpblog.jp/wp-content/uploads/2017/01/activation_04.png=200x) ``` print(sess.run(tf.nn.softplus([-1., 0., 1.]))) y_softplus = sess.run(tf.nn.softplus(x_vals)) ``` Softplus refers to the following function \begin{equation} \log\left(\exp(x) + 1\right) \end{equation} Exponential linear activation ``` print(sess.run(tf.nn.elu([-1., 0., 1.]))) y_elu = sess.run(tf.nn.elu(x_vals)) ``` ELU refers to the following function \begin{equation}\label{eq:} f = \begin{cases} \exp(x) - 1 &(x < 0 )\\ 0 &(x \geq 0 )\\ \end{cases} \end{equation} ### Plot the different functions ``` plt.style.use('ggplot') plt.plot(x_vals, y_softplus, 'r--', label='Softplus', linewidth=2) plt.plot(x_vals, y_relu, 'b:', label='ReLU', linewidth=2) plt.plot(x_vals, y_relu6, 'g-.', label='ReLU6', linewidth=2) plt.plot(x_vals, y_elu, 'k-', label='ExpLU', linewidth=0.5) plt.ylim([-1.5,7]) plt.legend(loc='upper left', shadow=True, edgecolor='k') plt.show() plt.plot(x_vals, y_sigmoid, 'r--', label='Sigmoid', linewidth=2) plt.plot(x_vals, y_tanh, 'b:', label='Tanh', linewidth=2) plt.plot(x_vals, y_softsign, 'g-.', label='Softsign', linewidth=2) plt.ylim([-1.3,1.3]) plt.legend(loc='upper left', shadow=True, edgecolor='k') plt.show() ``` ![Acivation_Functions1](https://github.com/nfmcclure/tensorflow_cookbook/raw/jupyter_notebooks/01_Introduction/images/06_activation_funs1.png) ![Acivation_Functions2](https://github.com/nfmcclure/tensorflow_cookbook/raw/jupyter_notebooks/01_Introduction/images/06_activation_funs2.png)
github_jupyter
<!--BOOK_INFORMATION--> <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png"> *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* <!--NAVIGATION--> < [Visualization with Seaborn](04.14-Visualization-With-Seaborn.ipynb) | [Contents](Index.ipynb) | [Machine Learning](05.00-Machine-Learning.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/04.15-Further-Resources.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a> # Further Resources ## Matplotlib Resources A single chapter in a book can never hope to cover all the available features and plot types available in Matplotlib. As with other packages we've seen, liberal use of IPython's tab-completion and help functions (see [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb)) can be very helpful when exploring Matplotlib's API. In addition, Matplotlib’s [online documentation](http://matplotlib.org/) can be a helpful reference. See in particular the [Matplotlib gallery](http://matplotlib.org/gallery.html) linked on that page: it shows thumbnails of hundreds of different plot types, each one linked to a page with the Python code snippet used to generate it. In this way, you can visually inspect and learn about a wide range of different plotting styles and visualization techniques. For a book-length treatment of Matplotlib, I would recommend [*Interactive Applications Using Matplotlib*](https://www.packtpub.com/application-development/interactive-applications-using-matplotlib), written by Matplotlib core developer Ben Root. ## Other Python Graphics Libraries Although Matplotlib is the most prominent Python visualization library, there are other more modern tools that are worth exploring as well. I'll mention a few of them briefly here: - [Bokeh](http://bokeh.pydata.org) is a JavaScript visualization library with a Python frontend that creates highly interactive visualizations capable of handling very large and/or streaming datasets. The Python front-end outputs a JSON data structure that can be interpreted by the Bokeh JS engine. - [Plotly](http://plot.ly) is the eponymous open source product of the Plotly company, and is similar in spirit to Bokeh. Because Plotly is the main product of a startup, it is receiving a high level of development effort. Use of the library is entirely free. - [Vispy](http://vispy.org/) is an actively developed project focused on dynamic visualizations of very large datasets. Because it is built to target OpenGL and make use of efficient graphics processors in your computer, it is able to render some quite large and stunning visualizations. - [Vega](https://vega.github.io/) and [Vega-Lite](https://vega.github.io/vega-lite) are declarative graphics representations, and are the product of years of research into the fundamental language of data visualization. The reference rendering implementation is JavaScript, but the API is language agnostic. There is a Python API under development in the [Altair](https://altair-viz.github.io/) package. Though as of summer 2016 it's not yet fully mature, I'm quite excited for the possibilities of this project to provide a common reference point for visualization in Python and other languages. The visualization space in the Python community is very dynamic, and I fully expect this list to be out of date as soon as it is published. Keep an eye out for what's coming in the future! <!--NAVIGATION--> < [Visualization with Seaborn](04.14-Visualization-With-Seaborn.ipynb) | [Contents](Index.ipynb) | [Machine Learning](05.00-Machine-Learning.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/04.15-Further-Resources.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
github_jupyter
# PyWRspice Wrapper Tutorial: Run simulation on remote SSH server #### Prerequisite: * You need to complete the *Tutorial.ipynb* notebook first. Here we assume you are already famililar with running PyWRspice on a local computer. ``` # Add pyWRspice location to system path, if you haven't run setup.py import sys sys.path.append("../") import numpy as np import logging, importlib from pyWRspice import script, simulation, remote import matplotlib.pyplot as plt %matplotlib inline logging.basicConfig(level=logging.WARNING) ``` ### 0. Set up a connection to an SSH server Assume you store login info into a variable ```ssh_login = (server_name, user_name, password)``` and specify local directory as ```local_dir```, and remote directory as ```remote_dir``` to store simulation related temporary files. Set up a handler ``` engine_remote = remote.WRWrapperSSH(ssh_login[0],ssh_login[1],ssh_login[2], local_dir=local_dir, remote_dir=remote_dir, command = "/usr/local/xictools/bin/wrspice") ``` ## 1. Run a WRspice script one time Let's try to run the same script from *Tutorial.ipynb*, this time on an SSH server. ``` script2 = """* Transient response of RLC circuit .tran 50p 100n * RLC model of a transmission line R1 1 2 0.1 L1 2 3 1n C1 3 0 {cap}p R2 3 0 1e3 * Load impedance Rload 3 0 50 * Pulse voltage source V1 1 0 pulse(0 1 1n 1n 1n {dur}n) * .control run set filetype=binary write {output_file} v(2) v(3) .endc """ ``` We then specify the values of ```cap``` and ```dur``` when execute the script with the ```run``` function, the same way we would do when running on local machine. ``` dat2 = engine_remote.run(script2,cap=30, dur=40) # Extract the data dat2 = dat2.to_array() ts = dat2[0] v2 = dat2[1] v3 = dat2[2] # Plot the data fig = plt.figure(figsize=(12,6)) plt.plot(ts*1e9, v2, label="v(2)") plt.plot(ts*1e9, v3, label="v(3)") plt.xlabel("Time [ns]") plt.ylabel("Voltage [V]") plt.legend() plt.show() ``` ## 2. Run WRspice script with multiple parametric values in parallel We can pass a list of values to one or more parameters and run them all in parallel, using multiprocessing, with the ```run_parallel()``` method, almost the same way as running on a local machine, except that we now have a few more options on how to handle the files. Comparing to its local version, the remote function ```run_parallel``` has 2 new options: * ```save_file```: The function create a series of files (to be explained later) on the local and remote machines, such as circuit files and output files (after execution). To remove these files, set ```save_file=True``` (default). * ```read_raw```: By default (True), the function will read the output raw files into memory. If the output data can be too large, consider set ```read_raw=False```, then the returned value is the list of output filenames (to be manually imported later). One can control how the function returns by the parameter ```reshape```: If True, return (params,values) whose shapes are the same (same as the local version). If False, return a pandas DataFrame object containing the params and results in the column ```result```. ``` # Read the docs engine_remote.run_parallel? ``` #### Simple case: The same way as local version. ``` params = {} params["cap"] = [20,50,100] params["dur"] = [40,60] params3, dat3 = engine_remote.run_parallel(script2,save_file=False,**params) ``` Because ```reshape=True``` by default, the returned values are the same as in the local case. ``` # Examine the returned parameter values for k,v in params3.items(): print("%s = %s" %(k,v)) print("") # Get the shape of the returned data dat3.shape # Plot the data fig = plt.figure(figsize=(12,6)) shape = dat3.shape for i in range(shape[0]): for j in range(shape[1]): dat = dat3[i,j].to_array() ts = dat[0] v3 = dat[2] plt.plot(ts*1e9, v3, label="cap=%s[pF], dur=%s[ns]" %(params3["cap"][i,j],params3["dur"][i,j])) plt.xlabel("Time [ns]") plt.ylabel("Voltage [V]") plt.legend() plt.show() ``` #### A more controlled case: turn off ```reshape``` and ```read_raw``` ``` params = {} params["cap"] = [20,50,100] params["dur"] = [40,60] dat4 = engine_remote.run_parallel(script2,save_file=False,reshape=False,read_raw=False,**params) ``` Because in this case ```reshape=False```, the returned value is a pandas DataFrame with all the simulation parameters and output. ``` dat4 ``` Because ```read_raw=False```, the returned output is a list of output raw filenames. ``` for fname in dat4["result"]: print(fname) ``` So we need to do some extra steps to read the output data and reshape them. We can do so manually, or run the function ```reshape_result```. ``` params4, dat4r = engine_remote.reshape_results(dat4,params) # Examine the returned parameter values for k,v in params4.items(): print("%s = %s" %(k,v)) print("") # Get the shape of the returned data # Note that it is an array of output raw filenames dat4r.shape # Plot the data fig = plt.figure(figsize=(12,6)) shape = dat4r.shape for i in range(shape[0]): for j in range(shape[1]): dat = simulation.RawFile(dat4r[i,j]).to_array() # Need to import the raw file using RawFile class ts = dat[0] v3 = dat[2] plt.plot(ts*1e9, v3, label="cap=%s[pF], dur=%s[ns]" %(params3["cap"][i,j],params3["dur"][i,j])) plt.xlabel("Time [ns]") plt.ylabel("Voltage [V]") plt.legend() plt.show() ``` ## 3. Run long simulation on server The ways we have run the simulation so far are appropriate for rather light-weight simulation which is expected to be completed in an hour or so on the server. When running heavy simulation on the server, we want to have the simulation running while we can disconnect the SSH connection, then we can be back later to collect the output. The way to do so is to break up the function ```run_parallel``` into multiple steps: prepare the files needed for the simulation on the server, then manually execute the simulation, then collect the result. #### Prepare the files The function ```prepare_parallel``` creates local and remote copies of the circuit files. It returns a configuration file containing information for execution. If there are additional files needed for the simulation (e.g. input files), they have to be copied to the server by the function ```put```. ``` fconfig = engine_remote.prepare_parallel(script2,**params) print(fconfig) # Let's read the first line of fconfig. We can get the local path by function local_fname with open(engine_remote.local_fname(fconfig),'r') as f: print(f.readline()) ``` #### Execute As shown above, the command to execute the simulation is ```python run_parallel.py simconfig_20200107_173940.csv --processes=<num>```. How to do it (safely): 1. Manually SSH log in to the server 2. Change directory ```cd``` to the working directory ```engine_remote.remote_dir``` 3. Create a separate session by running the command ```screen``` (or ```screen -S <name>``` to specify the screen name) 4. Run the above command: ```python run_parallel.py simconfig_20200107_173940.csv``` with optional ```--processes=<num>``` (```num``` is the number of processes in parallel, default is 64) 5. Then hit ```Ctrl + a``` and ```d``` to detach from the screen session 6. Now you can disconnect from the SSH server. The job will continue in the background. #### Collect results Two ways to check if the job is done: * Manually log in to the server, change to ```remote_dir``` and check if the file ```finish_<fconfig>.txt``` exists (when the simulation is completed, it will create that file). * Run the function ```get_results``` to automatically check and collect the output files. ``` dat5 = engine_remote.get_results(fconfig,timeout=100,read_raw=False) ``` Because we set ```read_raw=False```, the returned ```dat5``` is the same as ```dat4``` above. We need to run extra steps to reshape and read the output. ``` params5, dat5r = engine_remote.reshape_results(dat5,params) # The results should be the same as params4 and dat4r above # Finally, after analyzing the results, we need to remove the temp files (circuit and output files, etc) engine_remote.remove_fconfig(fconfig, dest="both") # Set dest="local" or dest="remote" if necessary ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/NAIP/ndwi_single.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi_single.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi_single.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
# The Fuzzing Book ## Sitemap While the chapters of this book can be read one after the other, there are many possible paths through the book. In this graph, an arrow _A_ → _B_ means that chapter _A_ is a prerequisite for chapter _B_. You can pick arbitrary paths in this graph to get to the topics that interest you most: ``` # ignore from IPython.display import SVG # ignore SVG(filename='PICS/Sitemap.svg') ``` ## [Table of Contents](index.ipynb) ### <a href="01_Intro.ipynb" title="Part I: Whetting Your Appetite (01_Intro)&#10;&#10;In this part, we introduce the topics of the book.">Part I: Whetting Your Appetite</a> * <a href="Tours.ipynb" title="Tours through the Book (Tours)&#10;&#10;This book is massive. With 17,000 lines of code and 125,000 words of text, a printed version would cover more than 1,000 pages of text. Obviously, we do not assume that everybody wants to read everything.">Tours through the Book</a> * <a href="Intro_Testing.ipynb" title="Introduction to Software Testing (Intro_Testing)&#10;&#10;Before we get to the central parts of the book, let us introduce essential concepts of software testing. Why is it necessary to test software at all? How does one test software? How can one tell whether a test has been successful? How does one know if one has tested enough? In this chapter, let us recall the most important concepts, and at the same time get acquainted with Python and interactive notebooks.">Introduction to Software Testing</a> ### <a href="02_Lexical_Fuzzing.ipynb" title="Part II: Lexical Fuzzing (02_Lexical_Fuzzing)&#10;&#10;This part introduces test generation at the lexical level, that is, composing sequences of characters.">Part II: Lexical Fuzzing</a> * <a href="Fuzzer.ipynb" title="Fuzzing: Breaking Things with Random Inputs (Fuzzer)&#10;&#10;In this chapter, we&#x27;ll start with one of the simplest test generation techniques. The key idea of random text generation, also known as fuzzing, is to feed a string of random characters into a program in the hope to uncover failures.">Fuzzing: Breaking Things with Random Inputs</a> * <a href="Coverage.ipynb" title="Code Coverage (Coverage)&#10;&#10;In the previous chapter, we introduced basic fuzzing – that is, generating random inputs to test programs. How do we measure the effectiveness of these tests? One way would be to check the number (and seriousness) of bugs found; but if bugs are scarce, we need a proxy for the likelihood of a test to uncover a bug. In this chapter, we introduce the concept of code coverage, measuring which parts of a program are actually executed during a test run. Measuring such coverage is also crucial for test generators that attempt to cover as much code as possible.">Code Coverage</a> * <a href="MutationFuzzer.ipynb" title="Mutation-Based Fuzzing (MutationFuzzer)&#10;&#10;Most randomly generated inputs are syntactically invalid and thus are quickly rejected by the processing program. To exercise functionality beyond input processing, we must increase chances to obtain valid inputs. One such way is so-called mutational fuzzing – that is, introducing small changes to existing inputs that may still keep the input valid, yet exercise new behavior. We show how to create such mutations, and how to guide them towards yet uncovered code, applying central concepts from the popular AFL fuzzer.">Mutation-Based Fuzzing</a> * <a href="GreyboxFuzzer.ipynb" title="Greybox Fuzzing (GreyboxFuzzer)&#10;&#10;In the previous chapter, we have introduced mutation-based fuzzing, a technique that generates fuzz inputs by applying small mutations to given inputs. In this chapter, we show how to guide these mutations towards specific goals such as coverage. The algorithms in this book stem from the popular American Fuzzy Lop (AFL) fuzzer, in particular from its AFLFast and AFLGo flavors. We will explore the greybox fuzzing algorithm behind AFL and how we can exploit it to solve various problems for automated vulnerability detection.">Greybox Fuzzing</a> * <a href="SearchBasedFuzzer.ipynb" title="Search-Based Fuzzing (SearchBasedFuzzer)&#10;&#10;Sometimes we are not only interested in fuzzing as many as possible diverse program inputs, but in deriving specific test inputs that achieve some objective, such as reaching specific statements in a program. When we have an idea of what we are looking for, then we can search for it. Search algorithms are at the core of computer science, but applying classic search algorithms like breadth or depth first search to search for tests is unrealistic, because these algorithms potentially require us to look at all possible inputs. However, domain-knowledge can be used to overcome this problem. For example, if we can estimate which of several program inputs is closer to the one we are looking for, then this information can guide us to reach the target quicker – this information is known as a heuristic. The way heuristics are applied systematically is captured in meta-heuristic search algorithms. The &quot;meta&quot; denotes that these algorithms are generic and can be instantiated differently to different problems. Meta-heuristics often take inspiration from processes observed in nature. For example, there are algorithms mimicking evolutionary processes, swarm intelligence, or chemical reactions. In general they are much more efficient than exhaustive search approaches such that they can be applied to vast search spaces – search spaces as vast as the domain of program inputs are no problem for them.">Search-Based Fuzzing</a> * <a href="MutationAnalysis.ipynb" title="Mutation Analysis (MutationAnalysis)&#10;&#10;In the chapter on coverage, we showed how one can identify which parts of the program are executed by a program, and hence get a sense of the effectiveness of a set of test cases in covering the program structure. However, coverage alone may not be the best measure for the effectiveness of a test, as one can have great coverage without ever checking a result for correctness. In this chapter, we introduce another means for assessing the effectiveness of a test suite: After injecting mutations – artificial faults – into the code, we check whether a test suite can detect these artificial faults. The idea is that if it fails to detect such mutations, it will also miss real bugs.">Mutation Analysis</a> ### <a href="03_Syntactical_Fuzzing.ipynb" title="Part III: Syntactical Fuzzing (03_Syntactical_Fuzzing)&#10;&#10;This part introduces test generation at the syntactical level, that is, composing inputs from language structures.">Part III: Syntactical Fuzzing</a> * <a href="Grammars.ipynb" title="Fuzzing with Grammars (Grammars)&#10;&#10;In the chapter on &quot;Mutation-Based Fuzzing&quot;, we have seen how to use extra hints – such as sample input files – to speed up test generation. In this chapter, we take this idea one step further, by providing a specification of the legal inputs to a program. Specifying inputs via a grammar allows for very systematic and efficient test generation, in particular for complex input formats. Grammars also serve as the base for configuration fuzzing, API fuzzing, GUI fuzzing, and many more.">Fuzzing with Grammars</a> * <a href="GrammarFuzzer.ipynb" title="Efficient Grammar Fuzzing (GrammarFuzzer)&#10;&#10;In the chapter on grammars, we have seen how to use grammars for very effective and efficient testing. In this chapter, we refine the previous string-based algorithm into a tree-based algorithm, which is much faster and allows for much more control over the production of fuzz inputs.">Efficient Grammar Fuzzing</a> * <a href="GrammarCoverageFuzzer.ipynb" title="Grammar Coverage (GrammarCoverageFuzzer)&#10;&#10;Producing inputs from grammars gives all possible expansions of a rule the same likelihood. For producing a comprehensive test suite, however, it makes more sense to maximize variety – for instance, by not repeating the same expansions over and over again. In this chapter, we explore how to systematically cover elements of a grammar such that we maximize variety and do not miss out individual elements.">Grammar Coverage</a> * <a href="Parser.ipynb" title="Parsing Inputs (Parser)&#10;&#10;In the chapter on Grammars, we discussed how grammars can be&#10;used to represent various languages. We also saw how grammars can be used to&#10;generate strings of the corresponding language. Grammars can also perform the&#10;reverse. That is, given a string, one can decompose the string into its&#10;constituent parts that correspond to the parts of grammar used to generate it&#10;– the derivation tree of that string. These parts (and parts from other similar&#10;strings) can later be recombined using the same grammar to produce new strings.">Parsing Inputs</a> * <a href="ProbabilisticGrammarFuzzer.ipynb" title="Probabilistic Grammar Fuzzing (ProbabilisticGrammarFuzzer)&#10;&#10;Let us give grammars even more power by assigning probabilities to individual expansions. This allows us to control how many of each element should be produced, and thus allows us to target our generated tests towards specific functionality. We also show how to learn such probabilities from given sample inputs, and specifically direct our tests towards input features that are uncommon in these samples.">Probabilistic Grammar Fuzzing</a> * <a href="GeneratorGrammarFuzzer.ipynb" title="Fuzzing with Generators (GeneratorGrammarFuzzer)&#10;&#10;In this chapter, we show how to extend grammars with functions – pieces of code that get executed during grammar expansion, and that can generate, check, or change elements produced. Adding functions to a grammar allows for very versatile test generation, bringing together the best of grammar generation and programming.">Fuzzing with Generators</a> * <a href="GreyboxGrammarFuzzer.ipynb" title="Greybox Fuzzing with Grammars (GreyboxGrammarFuzzer)&#10;&#10;&lt;!--&#10;Previously, we have learned about mutational fuzzing, which generates new inputs by mutating seed inputs. Most mutational fuzzers represent inputs as a sequence of bytes and apply byte-level mutations to this byte sequence. Such byte-level mutations work great for compact file formats with a small number of structural constraints. However, most file formats impose a high-level structure on these byte sequences.">Greybox Fuzzing with Grammars</a> * <a href="Reducer.ipynb" title="Reducing Failure-Inducing Inputs (Reducer)&#10;&#10;By construction, fuzzers create inputs that may be hard to read. This causes issues during debugging, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that automatically reduce and simplify failure-inducing inputs to a minimum in order to ease debugging.">Reducing Failure-Inducing Inputs</a> ### <a href="04_Semantical_Fuzzing.ipynb" title="Part IV: Semantical Fuzzing (04_Semantical_Fuzzing)&#10;&#10;This part introduces test generation techniques that take the semantics of the input into account, notably the behavior of the program that processes the input.">Part IV: Semantical Fuzzing</a> * <a href="GrammarMiner.ipynb" title="Mining Input Grammars (GrammarMiner)&#10;&#10;So far, the grammars we have seen have been mostly specified manually – that is, you (or the person knowing the input format) had to design and write a grammar in the first place. While the grammars we have seen so far have been rather simple, creating a grammar for complex inputs can involve quite some effort. In this chapter, we therefore introduce techniques that automatically mine grammars from programs – by executing the programs and observing how they process which parts of the input. In conjunction with a grammar fuzzer, this allows us to &#10;1. take a program, &#10;2. extract its input grammar, and &#10;3. fuzz it with high efficiency and effectiveness, using the concepts in this book.">Mining Input Grammars</a> * <a href="InformationFlow.ipynb" title="Tracking Information Flow (InformationFlow)&#10;&#10;We have explored how one could generate better inputs that can penetrate deeper into the program in question. While doing so, we have relied on program crashes to tell us that we have succeeded in finding problems in the program. However, that is rather simplistic. What if the behavior of the program is simply incorrect, but does not lead to a crash? Can one do better?">Tracking Information Flow</a> * <a href="ConcolicFuzzer.ipynb" title="Concolic Fuzzing (ConcolicFuzzer)&#10;&#10;We have previously seen how one can use dynamic taints to produce more intelligent test cases than simply looking for program crashes. We have also seen how one can use the taints to update the grammar, and hence focus more on the dangerous methods.">Concolic Fuzzing</a> * <a href="SymbolicFuzzer.ipynb" title="Symbolic Fuzzing (SymbolicFuzzer)&#10;&#10;One of the problems with traditional methods of fuzzing is that they fail to exercise all the possible behaviors that a system can have, especially when the input space is large. Quite often the execution of a specific branch of execution may happen only with very specific inputs, which could represent an extremely small fraction of the input space. The traditional fuzzing methods relies on chance to produce inputs they need. However, relying on randomness to generate values that we want is a bad idea when the space to be explored is huge. For example, a function that accepts a string, even if one only considers the first $10$ characters, already has $2^{80}$ possible inputs. If one is looking for a specific string, random generation of values will take a few thousand years even in one of the super computers.">Symbolic Fuzzing</a> * <a href="DynamicInvariants.ipynb" title="Mining Function Specifications (DynamicInvariants)&#10;&#10;When testing a program, one not only needs to cover its several behaviors; one also needs to check whether the result is as expected. In this chapter, we introduce a technique that allows us to mine function specifications from a set of given executions, resulting in abstract and formal descriptions of what the function expects and what it delivers.">Mining Function Specifications</a> ### <a href="05_Domain-Specific_Fuzzing.ipynb" title="Part V: Domain-Specific Fuzzing (05_Domain-Specific_Fuzzing)&#10;&#10;This part discusses test generation for a number of specific domains. For all these domains, we introduce fuzzers that generate inputs as well as miners that analyze the input structure.">Part V: Domain-Specific Fuzzing</a> * <a href="ConfigurationFuzzer.ipynb" title="Testing Configurations (ConfigurationFuzzer)&#10;&#10;The behavior of a program is not only governed by its data. The configuration of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically test and cover software configurations. By automatically inferring configuration options, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover combinations of configuration options, quickly detecting unwanted interferences.">Testing Configurations</a> * <a href="APIFuzzer.ipynb" title="Fuzzing APIs (APIFuzzer)&#10;&#10;So far, we have always generated system input, i.e. data that the program as a whole obtains via its input channels. However, we can also generate inputs that go directly into individual functions, gaining flexibility and speed in the process. In this chapter, we explore the use of grammars to synthesize code for function calls, which allows you to generate program code that very efficiently invokes functions directly.">Fuzzing APIs</a> * <a href="Carver.ipynb" title="Carving Unit Tests (Carver)&#10;&#10;So far, we have always generated system input, i.e. data that the program as a whole obtains via its input channels. If we are interested in testing only a small set of functions, having to go through the system can be very inefficient. This chapter introduces a technique known as carving, which, given a system test, automatically extracts a set of unit tests that replicate the calls seen during the unit test. The key idea is to record such calls such that we can replay them later – as a whole or selectively. On top, we also explore how to synthesize API grammars from carved unit tests; this means that we can synthesize API tests without having to write a grammar at all.">Carving Unit Tests</a> * <a href="WebFuzzer.ipynb" title="Testing Web Applications (WebFuzzer)&#10;&#10;In this chapter, we explore how to generate tests for Graphical User Interfaces (GUIs), notably on Web interfaces. We set up a (vulnerable) Web server and demonstrate how to systematically explore its behavior – first with hand-written grammars, then with grammars automatically inferred from the user interface. We also show how to conduct systematic attacks on these servers, notably with code and SQL injection.">Testing Web Applications</a> * <a href="GUIFuzzer.ipynb" title="Testing Graphical User Interfaces (GUIFuzzer)&#10;&#10;In this chapter, we explore how to generate tests for Graphical User Interfaces (GUIs), abstracting from our previous examples on Web testing. Building on general means to extract user interface elements and to activate them, our techniques generalize to arbitrary graphical user interfaces, from rich Web applications to mobile apps, and systematically explore user interfaces through forms and navigation elements.">Testing Graphical User Interfaces</a> ### <a href="06_Managing_Fuzzing.ipynb" title="Part VI: Managing Fuzzing (06_Managing_Fuzzing)&#10;&#10;This part discusses how to manage fuzzing in the large.">Part VI: Managing Fuzzing</a> * <a href="FuzzingInTheLarge.ipynb" title="Fuzzing in the Large (FuzzingInTheLarge)&#10;&#10;In the past chapters, we have always looked at fuzzing taking place on one machine for a few seconds only. In the real world, however, fuzzers are run on dozens or even thousands of machines; for hours, days and weeks; for one program or dozens of programs. In such contexts, one needs an infrastructure to collect failure data from the individual fuzzer runs, and to aggregate such data in a central repository. In this chapter, we will examine such an infrastructure, the FuzzManager framework from Mozilla.">Fuzzing in the Large</a> * <a href="WhenToStopFuzzing.ipynb" title="When To Stop Fuzzing (WhenToStopFuzzing)&#10;&#10;In the past chapters, we have discussed several fuzzing techniques. Knowing what to do is important, but it is also important to know when to stop doing things. In this chapter, we will learn when to stop fuzzing – and use a prominent example for this purpose: The Enigma machine that was used in the second world war by the navy of Nazi Germany to encrypt communications, and how Alan Turing and I.J. Good used fuzzing techniques to crack ciphers for the Naval Enigma machine.">When To Stop Fuzzing</a> ### <a href="99_Appendices.ipynb" title="Appendices (99_Appendices)&#10;&#10;This part holds notebooks and modules that support other notebooks.">Appendices</a> * <a href="PrototypingWithPython.ipynb" title="Prototyping with Python (PrototypingWithPython)&#10;&#10;This is the manuscript of Andreas Zeller&#x27;s keynote&#10;&quot;Coding Effective Testing Tools Within Minutes&quot; at the TAIC PART 2020 conference.">Prototyping with Python</a> * <a href="ExpectError.ipynb" title="Error Handling (ExpectError)&#10;&#10;The code in this notebook helps with handling errors. Normally, an error in notebook code causes the execution of the code to stop; while an infinite loop in notebook code causes the notebook to run without end. This notebook provides two classes to help address these concerns.">Error Handling</a> * <a href="Timer.ipynb" title="Timer (Timer)&#10;&#10;The code in this notebook helps with measuring time.">Timer</a> * <a href="ControlFlow.ipynb" title="Control Flow Graph (ControlFlow)&#10;&#10;The code in this notebook helps with obtaining the control flow graph of python functions.">Control Flow Graph</a> * <a href="RailroadDiagrams.ipynb" title="Railroad Diagrams (RailroadDiagrams)&#10;&#10;The code in this notebook helps with drawing syntax-diagrams. It is a (slightly customized) copy of the excellent library from Tab Atkins jr., which unfortunately is not available as a Python package.">Railroad Diagrams</a>
github_jupyter
``` import h5py import numpy as np import matplotlib.pyplot as plt plt.style.use('presentation') from shabanipy.jj.plotting_general import plot_inplane_vs_bias, plot_inplane_vs_Ic_Rn, plot_inplane_vs_IcRn #: Name of the sample that must appear in the measurement name usually of the form "{Wafer}-{Piece}_{Design}-{Iteration}_{Junction}_{Cooldown}" SAMPLE_NAME = "{Wafer}-{Piece}_{Design}-{Iteration}" SAMPLE_ID = "{Wafer}-{Piece}_{Design}-{Iteration}_{Junction}_{Cooldown}" #: hdf5 file number FILE_NUM = '' #: Path to store generated files PATH = (f"/Users/bh/Desktop/Code/Topological JJ/Samples/{SAMPLE_NAME}/{SAMPLE_ID}") #: Name of generated processed data file PROCESSED_DATA_NAME = (f"{PATH}/Data/{SAMPLE_ID}_processed-data-{FILE_NUM}.hdf5") h = h5py.File(PROCESSED_DATA_NAME, 'r') # field_y = 'In-plane Field - Y field_z = 'In-plane Field - Z' vg = 'Vg::' f = h['Data'][f'{field_z}'][f'{vg}'] #[f'{field_y}'] in_field = np.array(f['Vector Magnet - Field Y']) v_drop = np.array(f["Voltage Drop"]) scaled_v_drop = np.array(f["ScaledVoltage"]) bias = np.array(f["Bias"]) dVdI = np.diff(np.array(f["ScaledVoltage"]))/np.diff(np.array(f["Bias"])) dR = np.array(f["dR"]) plot_inplane_vs_bias(in_field, bias, np.abs(dR) # savgol_windowl = 3, savgol_polyorder = 1, # cvmax = , cvmin = , # bias_limits = , # in_field_limits = , # fig_size = , ) plt.savefig(f"Figs/In-plane Field/inplane_vs_bias__{SAMPLE_NAME}_field-z:{field_z[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight') # plt.savefig(f"Figs/In-plane Field/inplane_vs_bias__{SAMPLE_NAME}_field-y:{field_y[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight') """Voltage threshold in V above which the junction is not considered to carry a supercurrent anymore. Used in the determination of the critical current. Usually of the order of a couple e-5 or e-4. Default is 1e-4.""" ic_voltage_threshold = """Positive bias value above which the data can be used to extract the normal resistance. Default is 10e-6.""" high_bias_threshold = plot_inplane_vs_Ic_Rn(in_field, bias, scaled_v_drop, ic_voltage_threshold = ic_voltage_threshold, high_bias_threshold = high_bias_threshold, # savgol_windowl = 3, savgol_polyorder = 1, # ic_limits = , # rn_limits = , # in_field_limits = , # fig_size = , ) plt.savefig(f"Figs/In-plane Field/inplane_vs_Ic_Rn__{SAMPLE_NAME}_field-z:{field_z[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight') # plt.savefig(f"Figs/In-plane Field/inplane_vs_Ic_Rn__{SAMPLE_NAME}_field-y:{field_y[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight') plot_inplane_vs_IcRn(in_field, bias, scaled_v_drop, ic_voltage_threshold = ic_voltage_threshold, high_bias_threshold = high_bias_threshold, # savgol_windowl = 3, savgol_polyorder = 1, # icrn_limits = , # in_field_limits = , # fig_size = ,) plt.savefig(f"Figs/In-plane Field/inplane_vs_IcRn__{SAMPLE_NAME}_field-z:{field_z[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight') # plt.savefig(f"Figs/In-plane Field/inplane_vs_IcRn__{SAMPLE_NAME}_field-y:{field_y[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight') ```
github_jupyter
# WOR Forecasting In this section is introduced the basic classes and functions to make Forecast by applying the Wor Methodology ``` import os from dcapy import dca from datetime import date import numpy as np ``` The WOR forecasting is an empirical method to estimate the trend of the water production with respect the cumulative oil production. Generally you can determine the WOR (Water-Oil Ratio) vs the Np (Cumulative Oil Production) linear relationship on a semi-log plot when preducing at a constant rate of total fluids. $ WOR = \frac{q_{w}}{q_{o}} $ ## Simple Functions to convert Bsw to Wor ``` list_bsw = [0.01,0.01,0.1,0.5,0.8,0.9,0.95,0.99] list_wor = dca.bsw_to_wor(list_bsw) dca.wor_to_bsw(list_wor) ``` ## Wor Forecasting function The parameters required to define a WOR model are: + **Slope**: It is the relationship between the WOR and Np. It is defined as $\frac{d(log(WOR))}{d Np}$ + **Fluid Rate**: Total fluid rate production target + **Ti**: Initial Time + **WOR initial**: The Wor value at the initial time ``` time1 = np.arange(0,10,1) slope = 3e-6 bswi = 0.5 wori = dca.bsw_to_wor(bswi) fluid_rate = [5000]*10 f1 = dca.wor_forecast(time1,fluid_rate,slope,wori) print(f1) ``` In this case you have to pass an array with the desired rate whose length be equal to the time array. That means you can pass a fluid rate array with different values. ``` time1 = np.arange(0,10,1) slope = 3e-5 bswi = 0.5 wori = dca.bsw_to_wor(bswi) fluid_rate = [5000]*5 + [6000]*5 f1 = dca.wor_forecast(time1,fluid_rate,slope,wori) print(f1) ``` ## Wor Class Like Arps class, the Wor class have the same advantages described before. In this case you can pass the initial bsw directly so it internally will convert it to WOR value. ``` bsw = 0.5 slope = 3.5e-6 ti = 0 fluid = 1000 w1 = dca.Wor(bsw=bsw,slope=slope,ti=ti, fluid_rate = fluid) print(type(w1)) ``` The forecast method is also present with the same parameters as seen in Arps class ``` fr = w1.forecast( start = 0, end = 5, ) print(fr) ``` If you want to change the fluid rate you can pass a different value when calling the `forecast` method ``` fr = w1.forecast( start = 0, end = 10, fluid_rate = 2000 ) print(fr) ``` ## Multiple Values You can create Wor instances with multiple values on each of the parameters. This will create additional iterations accorging with the number of cases and the broadcast shape ``` bsw = [0.4,0.5,0.6] slope = 3.5e-6 ti = 0 fluid = 1000 w2 = dca.Wor(bsw=bsw,slope=slope,ti=ti, fluid_rate = fluid) fr = w2.forecast( start = 0, end = 4, fluid_rate = 2000 ) print(fr) ``` As the each case of fluid rate can be an array with multiple values, you can pass a 2D array to make more than one iteration. ``` bsw = 0.4 slope = 3.5e-6 ti = 0 fluid = [[1000],[2000]] w3 = dca.Wor(bsw=bsw,slope=slope,ti=ti, fluid_rate = fluid) fr = w3.forecast( start = 0, end = 4, ) print(fr) bsw = 0.4 slope = 3.5e-6 ti = 0 fluid = [[1000,1200,1300,1250],[2000,2200,2300,2250]] w4 = dca.Wor(bsw=bsw,slope=slope,ti=ti, fluid_rate = fluid) fr = w4.forecast( start = 0, end = 4, ) print(fr) ``` ## Wor with Dates ``` w1 = dca.Wor( bsw = 0.5, slope = 3e-5, fluid_rate = 4000, ti=date(2021,1,1) ) print(w1) fr = w1.forecast(start=date(2021,1,1),end=date(2021,1,10),freq_output='D') print(fr) fr = w1.forecast(start=date(2021,1,1),end=date(2022,1,1),freq_output='M') print(fr) fr = w1.forecast(start=date(2021,1,1),end=date(2024,1,1),freq_output='A') print(fr) ```
github_jupyter
``` import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 from numpy import array origin_image=mpimg.imread("canny-edge-detection-test.jpg") plt.figure() # plt.subplot(1,3,1) # plt.imshow(image) image=array(origin_image) ysize = image.shape[0] xsize = image.shape[1] left_bottom = [10,540] right_bottom = [900,540] apex = [480, 280] # Fit lines (y=Ax+B) to identify the 3 sided region of interest # np.polyfit() returns the coefficients [A, B] of the fit fit_left = np.polyfit((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1) fit_right = np.polyfit((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1) fit_bottom = np.polyfit((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom[1]), 1) # Find the region inside the lines XX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize)) region_thresholds = (YY > (XX*fit_left[0] + fit_left[1])) & \ (YY > (XX*fit_right[0] + fit_right[1])) & \ (YY < (XX*fit_bottom[0] + fit_bottom[1])) gray_image=cv2.cvtColor(image,cv2.COLOR_RGB2GRAY) # plt.subplot(1,3,2) # plt.imshow(gray_image) # Define a kernel size and apply Gaussian smoothing kernel_size = 5 blur_gray = cv2.GaussianBlur(gray_image,(kernel_size, kernel_size),0) # Define our parameters for Canny and apply low_threshold = 50 high_threshold = 150 edges = cv2.Canny(blur_gray, low_threshold, high_threshold) edges[~region_thresholds] = False # plt.subplot(1,3,3) # plt.imshow(edges) # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = 1 theta = np.pi/180 threshold = 15 min_line_length = 40 max_line_gap = 20 line_image = np.copy(image)*0 #creating a blank to draw lines on # Run Hough on edge detected image lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap) # Iterate over the output "lines" and draw lines on the blank for line in lines: for x1,y1,x2,y2 in line: cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10) # Create a "color" binary image to combine with line image color_edges = np.dstack((edges, edges, edges)) # Draw the lines on the edge image combo = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0) plt.imshow(combo) import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 # Read in and grayscale the image # Note: in the previous example we were reading a .jpg # Here we read a .png and convert to 0,255 bytescale image = mpimg.imread("canny-edge-detection-test.jpg") gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY) # Define a kernel size and apply Gaussian smoothing kernel_size = 5 blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0) # Define our parameters for Canny and apply low_threshold = 50 high_threshold = 150 edges = cv2.Canny(blur_gray, low_threshold, high_threshold) # Next we'll create a masked edges image using cv2.fillPoly() mask = np.zeros_like(edges) ignore_mask_color = 255 # This time we are defining a four sided polygon to mask imshape = image.shape vertices = np.array([[(0,imshape[0]),(450, 290), (490, 290), (imshape[1],imshape[0])]], dtype=np.int32) #在全0的图像上,在指定区域内填入了255 cv2.fillPoly(mask, vertices, ignore_mask_color) #将原始图像与上面填充的图像进行按位与,感兴趣区域外的点会被置为0,感兴趣区域内的点的边沿点原本就是255,按位与之后还是255,其余点均为0 masked_edges = cv2.bitwise_and(edges, mask) #可以试试自行调整以下参数,看看都有什么神奇的效果 # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = 1 # distance resolution in pixels of the Hough grid theta = np.pi/180 # angular resolution in radians of the Hough grid threshold = 15 # minimum number of votes (intersections in Hough grid cell) min_line_length = 40 #minimum number of pixels making up a line max_line_gap = 20 # maximum gap in pixels between connectable line segments line_image = np.copy(image)*0 # creating a blank to draw lines on # Run Hough on edge detected image # Output "lines" is an array containing endpoints of detected line segments lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap) #由于输出的只是“线条的端点集合”,所以我们要将这些点连起来,才能最终呈现我们想要的线条 # Iterate over the output "lines" and draw lines on a blank image for line in lines: for x1,y1,x2,y2 in line: cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10) # Create a "color" binary image to combine with line image #由于edges获得的只是2D的数组,每个点上的元素为一个数字,而真正的图像是每个点为[R,G,B]的数组,要想将edge图像与cv2.line输出的图像结合,需要将其转换为真正的图像,这就用到了dstack,感兴趣的同学可自行百度 color_edges = np.dstack((edges, edges, edges)) # Draw the lines on the edge image lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0) plt.imshow(lines_edges) ```
github_jupyter
# Bayesian Regression Using NumPyro In this tutorial, we will explore how to do bayesian regression in NumPyro, using a simple example adapted from Statistical Rethinking [[1](#References)]. In particular, we would like to explore the following: - Write a simple model using the `sample` NumPyro primitive. - Run inference using MCMC in NumPyro, in particular, using the No U-Turn Sampler (NUTS) to get a posterior distribution over our regression parameters of interest. - Learn about inference utilities such as `Predictive` and `log_likelihood`. - Learn how we can use effect-handlers in NumPyro to generate execution traces from the model, condition on sample statements, seed models with RNG seeds, etc., and use this to implement various utilities that will be useful for MCMC. e.g. computing model log likelihood, generating empirical distribution over the posterior predictive, etc. ## Tutorial Outline: 1. [Dataset](#Dataset) 2. [Regression Model to Predict Divorce Rate](#Regression-Model-to-Predict-Divorce-Rate) - [Model-1: Predictor-Marriage Rate](#Model-1:-Predictor---Marriage-Rate) - [Posterior Distribution over the Regression Parameters](#Posterior-Distribution-over-the-Regression-Parameters) - [Posterior Predictive Distribution](#Posterior-Predictive-Distribution) - [Predictive Utility With Effect Handlers](#Predictive-Utility-With-Effect-Handlers) - [Model Predictive Density](#Model-Predictive-Density) - [Model-2: Predictor-Median Age of Marriage](#Model-2:-Predictor---Median-Age-of-Marriage) - [Model-3: Predictor-Marriage Rate and Median Age of Marriage](#Model-3:-Predictor---Marriage-Rate-and-Median-Age-of-Marriage) - [Divorce Rate Residuals by State](#Divorce-Rate-Residuals-by-State) 3. [Regression Model with Measurement Error](#Regression-Model-with-Measurement-Error) - [Effect of Incorporating Measurement Noise on Residuals](#Effect-of-Incorporating-Measurement-Noise-on-Residuals) 4. [References](#References) ``` !pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro import os from IPython.display import set_matplotlib_formats import jax.numpy as jnp from jax import random, vmap from jax.scipy.special import logsumexp import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpyro from numpyro.diagnostics import hpdi import numpyro.distributions as dist from numpyro import handlers from numpyro.infer import MCMC, NUTS plt.style.use("bmh") if "NUMPYRO_SPHINXBUILD" in os.environ: set_matplotlib_formats("svg") assert numpyro.__version__.startswith("0.8.0") ``` ## Dataset For this example, we will use the `WaffleDivorce` dataset from Chapter 05, Statistical Rethinking [[1](#References)]. The dataset contains divorce rates in each of the 50 states in the USA, along with predictors such as population, median age of marriage, whether it is a Southern state and, curiously, number of Waffle Houses. ``` DATASET_URL = "https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/WaffleDivorce.csv" dset = pd.read_csv(DATASET_URL, sep=";") dset ``` Let us plot the pair-wise relationship amongst the main variables in the dataset, using `seaborn.pairplot`. ``` vars = [ "Population", "MedianAgeMarriage", "Marriage", "WaffleHouses", "South", "Divorce", ] sns.pairplot(dset, x_vars=vars, y_vars=vars, palette="husl"); ``` From the plots above, we can clearly observe that there is a relationship between divorce rates and marriage rates in a state (as might be expected), and also between divorce rates and median age of marriage. There is also a weak relationship between number of Waffle Houses and divorce rates, which is not obvious from the plot above, but will be clearer if we regress `Divorce` against `WaffleHouse` and plot the results. ``` sns.regplot(x="WaffleHouses", y="Divorce", data=dset); ``` This is an example of a spurious association. We do not expect the number of Waffle Houses in a state to affect the divorce rate, but it is likely correlated with other factors that have an effect on the divorce rate. We will not delve into this spurious association in this tutorial, but the interested reader is encouraged to read Chapters 5 and 6 of [[1](#References)] which explores the problem of causal association in the presence of multiple predictors. For simplicity, we will primarily focus on marriage rate and the median age of marriage as our predictors for divorce rate throughout the remaining tutorial. ## Regression Model to Predict Divorce Rate Let us now write a regressionn model in *NumPyro* to predict the divorce rate as a linear function of marriage rate and median age of marriage in each of the states. First, note that our predictor variables have somewhat different scales. It is a good practice to standardize our predictors and response variables to mean `0` and standard deviation `1`, which should result in [faster inference](https://mc-stan.org/docs/2_19/stan-users-guide/standardizing-predictors-and-outputs.html). ``` standardize = lambda x: (x - x.mean()) / x.std() dset["AgeScaled"] = dset.MedianAgeMarriage.pipe(standardize) dset["MarriageScaled"] = dset.Marriage.pipe(standardize) dset["DivorceScaled"] = dset.Divorce.pipe(standardize) ``` We write the NumPyro model as follows. While the code should largely be self-explanatory, take note of the following: - In NumPyro, *model* code is any Python callable which can optionally accept additional arguments and keywords. For HMC which we will be using for this tutorial, these arguments and keywords remain static during inference, but we can reuse the same model to generate [predictions](#Posterior-Predictive-Distribution) on new data. - In addition to regular Python statements, the model code also contains primitives like `sample`. These primitives can be interpreted with various side-effects using effect handlers. For more on effect handlers, refer to [[3](#References)], [[4](#References)]. For now, just remember that a `sample` statement makes this a stochastic function that samples some latent parameters from a *prior distribution*. Our goal is to infer the *posterior distribution* of these parameters conditioned on observed data. - The reason why we have kept our predictors as optional keyword arguments is to be able to reuse the same model as we vary the set of predictors. Likewise, the reason why the response variable is optional is that we would like to reuse this model to sample from the posterior predictive distribution. See the [section](#Posterior-Predictive-Distribution) on plotting the posterior predictive distribution, as an example. ``` def model(marriage=None, age=None, divorce=None): a = numpyro.sample("a", dist.Normal(0.0, 0.2)) M, A = 0.0, 0.0 if marriage is not None: bM = numpyro.sample("bM", dist.Normal(0.0, 0.5)) M = bM * marriage if age is not None: bA = numpyro.sample("bA", dist.Normal(0.0, 0.5)) A = bA * age sigma = numpyro.sample("sigma", dist.Exponential(1.0)) mu = a + M + A numpyro.sample("obs", dist.Normal(mu, sigma), obs=divorce) ``` ### Model 1: Predictor - Marriage Rate We first try to model the divorce rate as depending on a single variable, marriage rate. As mentioned above, we can use the same `model` code as earlier, but only pass values for `marriage` and `divorce` keyword arguments. We will use the No U-Turn Sampler (see [[5](#References)] for more details on the NUTS algorithm) to run inference on this simple model. The Hamiltonian Monte Carlo (or, the NUTS) implementation in NumPyro takes in a potential energy function. This is the negative log joint density for the model. Therefore, for our model description above, we need to construct a function which given the parameter values returns the potential energy (or negative log joint density). Additionally, the verlet integrator in HMC (or, NUTS) returns sample values simulated using Hamiltonian dynamics in the unconstrained space. As such, continuous variables with bounded support need to be transformed into unconstrained space using bijective transforms. We also need to transform these samples back to their constrained support before returning these values to the user. Thankfully, this is handled on the backend for us, within a convenience class for doing [MCMC inference](https://numpyro.readthedocs.io/en/latest/mcmc.html#numpyro.mcmc.MCMC) that has the following methods: - `run(...)`: runs warmup, adapts steps size and mass matrix, and does sampling using the sample from the warmup phase. - `print_summary()`: print diagnostic information like quantiles, effective sample size, and the Gelman-Rubin diagnostic. - `get_samples()`: gets samples from the posterior distribution. Note the following: - JAX uses functional PRNGs. Unlike other languages / frameworks which maintain a global random state, in JAX, every call to a sampler requires an [explicit PRNGKey](https://github.com/google/jax#random-numbers-are-different). We will split our initial random seed for subsequent operations, so that we do not accidentally reuse the same seed. - We run inference with the `NUTS` sampler. To run vanilla HMC, we can instead use the [HMC](https://numpyro.readthedocs.io/en/latest/mcmc.html#numpyro.mcmc.HMC) class. ``` # Start from this source of randomness. We will split keys for subsequent operations. rng_key = random.PRNGKey(0) rng_key, rng_key_ = random.split(rng_key) # Run NUTS. kernel = NUTS(model) num_samples = 2000 mcmc = MCMC(kernel, num_warmup=1000, num_samples=num_samples) mcmc.run( rng_key_, marriage=dset.MarriageScaled.values, divorce=dset.DivorceScaled.values ) mcmc.print_summary() samples_1 = mcmc.get_samples() ``` #### Posterior Distribution over the Regression Parameters We notice that the progress bar gives us online statistics on the acceptance probability, step size and number of steps taken per sample while running NUTS. In particular, during warmup, we adapt the step size and mass matrix to achieve a certain target acceptance probability which is 0.8, by default. We were able to successfully adapt our step size to achieve this target in the warmup phase. During warmup, the aim is to adapt hyper-parameters such as step size and mass matrix (the HMC algorithm is very sensitive to these hyper-parameters), and to reach the typical set (see [[6](#References)] for more details). If there are any issues in the model specification, the first signal to notice would be low acceptance probabilities or very high number of steps. We use the sample from the end of the warmup phase to seed the MCMC chain (denoted by the second `sample` progress bar) from which we generate the desired number of samples from our target distribution. At the end of inference, NumPyro prints the mean, std and 90% CI values for each of the latent parameters. Note that since we standardized our predictors and response variable, we would expect the intercept to have mean 0, as can be seen here. It also prints other convergence diagnostics on the latent parameters in the model, including [effective sample size](https://numpyro.readthedocs.io/en/latest/diagnostics.html#numpyro.diagnostics.effective_sample_size) and the [gelman rubin diagnostic](https://numpyro.readthedocs.io/en/latest/diagnostics.html#numpyro.diagnostics.gelman_rubin) ($\hat{R}$). The value for these diagnostics indicates that the chain has converged to the target distribution. In our case, the "target distribution" is the posterior distribution over the latent parameters that we are interested in. Note that this is often worth verifying with multiple chains for more complicated models. In the end, `samples_1` is a collection (in our case, a `dict` since `init_samples` was a `dict`) containing samples from the posterior distribution for each of the latent parameters in the model. To look at our regression fit, let us plot the regression line using our posterior estimates for the regression parameters, along with the 90% Credibility Interval (CI). Note that the [hpdi](https://numpyro.readthedocs.io/en/latest/diagnostics.html#numpyro.diagnostics.hpdi) function in NumPyro's diagnostics module can be used to compute CI. In the functions below, note that the collected samples from the posterior are all along the leading axis. ``` def plot_regression(x, y_mean, y_hpdi): # Sort values for plotting by x axis idx = jnp.argsort(x) marriage = x[idx] mean = y_mean[idx] hpdi = y_hpdi[:, idx] divorce = dset.DivorceScaled.values[idx] # Plot fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 6)) ax.plot(marriage, mean) ax.plot(marriage, divorce, "o") ax.fill_between(marriage, hpdi[0], hpdi[1], alpha=0.3, interpolate=True) return ax # Compute empirical posterior distribution over mu posterior_mu = ( jnp.expand_dims(samples_1["a"], -1) + jnp.expand_dims(samples_1["bM"], -1) * dset.MarriageScaled.values ) mean_mu = jnp.mean(posterior_mu, axis=0) hpdi_mu = hpdi(posterior_mu, 0.9) ax = plot_regression(dset.MarriageScaled.values, mean_mu, hpdi_mu) ax.set( xlabel="Marriage rate", ylabel="Divorce rate", title="Regression line with 90% CI" ); ``` We can see from the plot, that the CI broadens towards the tails where the data is relatively sparse, as can be expected. #### Prior Predictive Distribution Let us check that we have set sensible priors by sampling from the prior predictive distribution. NumPyro provides a handy [Predictive](http://num.pyro.ai/en/latest/utilities.html#numpyro.infer.util.Predictive) utility for this purpose. ``` from numpyro.infer import Predictive rng_key, rng_key_ = random.split(rng_key) prior_predictive = Predictive(model, num_samples=100) prior_predictions = prior_predictive(rng_key_, marriage=dset.MarriageScaled.values)[ "obs" ] mean_prior_pred = jnp.mean(prior_predictions, axis=0) hpdi_prior_pred = hpdi(prior_predictions, 0.9) ax = plot_regression(dset.MarriageScaled.values, mean_prior_pred, hpdi_prior_pred) ax.set(xlabel="Marriage rate", ylabel="Divorce rate", title="Predictions with 90% CI"); ``` #### Posterior Predictive Distribution Let us now look at the posterior predictive distribution to see how our predictive distribution looks with respect to the observed divorce rates. To get samples from the posterior predictive distribution, we need to run the model by substituting the latent parameters with samples from the posterior. Note that by default we generate a single prediction for each sample from the joint posterior distribution, but this can be controlled using the `num_samples` argument. ``` rng_key, rng_key_ = random.split(rng_key) predictive = Predictive(model, samples_1) predictions = predictive(rng_key_, marriage=dset.MarriageScaled.values)["obs"] df = dset.filter(["Location"]) df["Mean Predictions"] = jnp.mean(predictions, axis=0) df.head() ``` #### Predictive Utility With Effect Handlers To remove the magic behind `Predictive`, let us see how we can combine [effect handlers](https://numpyro.readthedocs.io/en/latest/handlers.html) with the [vmap](https://github.com/google/jax#auto-vectorization-with-vmap) JAX primitive to implement our own simplified predictive utility function that can do vectorized predictions. ``` def predict(rng_key, post_samples, model, *args, **kwargs): model = handlers.seed(handlers.condition(model, post_samples), rng_key) model_trace = handlers.trace(model).get_trace(*args, **kwargs) return model_trace["obs"]["value"] # vectorize predictions via vmap predict_fn = vmap( lambda rng_key, samples: predict( rng_key, samples, model, marriage=dset.MarriageScaled.values ) ) ``` Note the use of the `condition`, `seed` and `trace` effect handlers in the `predict` function. - The `seed` effect-handler is used to wrap a stochastic function with an initial `PRNGKey` seed. When a sample statement inside the model is called, it uses the existing seed to sample from a distribution but this effect-handler also splits the existing key to ensure that future `sample` calls in the model use the newly split key instead. This is to prevent us from having to explicitly pass in a `PRNGKey` to each `sample` statement in the model. - The `condition` effect handler conditions the latent sample sites to certain values. In our case, we are conditioning on values from the posterior distribution returned by MCMC. - The `trace` effect handler runs the model and records the execution trace within an `OrderedDict`. This trace object contains execution metadata that is useful for computing quantities such as the log joint density. It should be clear now that the `predict` function simply runs the model by substituting the latent parameters with samples from the posterior (generated by the `mcmc` function) to generate predictions. Note the use of JAX's auto-vectorization transform called [vmap](https://github.com/google/jax#auto-vectorization-with-vmap) to vectorize predictions. Note that if we didn't use `vmap`, we would have to use a native for loop which for each sample which is much slower. Each draw from the posterior can be used to get predictions over all the 50 states. When we vectorize this over all the samples from the posterior using `vmap`, we will get a `predictions_1` array of shape `(num_samples, 50)`. We can then compute the mean and 90% CI of these samples to plot the posterior predictive distribution. We note that our mean predictions match those obtained from the `Predictive` utility class. ``` # Using the same key as we used for Predictive - note that the results are identical. predictions_1 = predict_fn(random.split(rng_key_, num_samples), samples_1) mean_pred = jnp.mean(predictions_1, axis=0) df = dset.filter(["Location"]) df["Mean Predictions"] = mean_pred df.head() hpdi_pred = hpdi(predictions_1, 0.9) ax = plot_regression(dset.MarriageScaled.values, mean_pred, hpdi_pred) ax.set(xlabel="Marriage rate", ylabel="Divorce rate", title="Predictions with 90% CI"); ``` We have used the same `plot_regression` function as earlier. We notice that our CI for the predictive distribution is much broader as compared to the last plot due to the additional noise introduced by the `sigma` parameter. Most data points lie well within the 90% CI, which indicates a good fit. #### Posterior Predictive Density Likewise, making use of effect-handlers and `vmap`, we can also compute the log likelihood for this model given the dataset, and the log posterior predictive density [[6](#References)] which is given by $$ log \prod_{i=1}^{n} \int p(y_i | \theta) p_{post}(\theta) d\theta \approx \sum_{i=1}^n log \frac{\sum_s p(\theta^{s})}{S} \\ = \sum_{i=1}^n (log \sum_s p(\theta^{s}) - log(S)) $$. Here, $i$ indexes the observed data points $y$ and $s$ indexes the posterior samples over the latent parameters $\theta$. If the posterior predictive density for a model has a comparatively high value, it indicates that the observed data-points have higher probability under the given model. ``` def log_likelihood(rng_key, params, model, *args, **kwargs): model = handlers.condition(model, params) model_trace = handlers.trace(model).get_trace(*args, **kwargs) obs_node = model_trace["obs"] return obs_node["fn"].log_prob(obs_node["value"]) def log_pred_density(rng_key, params, model, *args, **kwargs): n = list(params.values())[0].shape[0] log_lk_fn = vmap( lambda rng_key, params: log_likelihood(rng_key, params, model, *args, **kwargs) ) log_lk_vals = log_lk_fn(random.split(rng_key, n), params) return (logsumexp(log_lk_vals, 0) - jnp.log(n)).sum() ``` Note that NumPyro provides the [log_likelihood](http://num.pyro.ai/en/latest/utilities.html#log-likelihood) utility function that can be used directly for computing `log likelihood` as in the first function for any general model. In this tutorial, we would like to emphasize that there is nothing magical about such utility functions, and you can roll out your own inference utilities using NumPyro's effect handling stack. ``` rng_key, rng_key_ = random.split(rng_key) print( "Log posterior predictive density: {}".format( log_pred_density( rng_key_, samples_1, model, marriage=dset.MarriageScaled.values, divorce=dset.DivorceScaled.values, ) ) ) ``` ### Model 2: Predictor - Median Age of Marriage We will now model the divorce rate as a function of the median age of marriage. The computations are mostly a reproduction of what we did for Model 1. Notice the following: - Divorce rate is inversely related to the age of marriage. Hence states where the median age of marriage is low will likely have a higher divorce rate. - We get a higher log likelihood as compared to Model 2, indicating that median age of marriage is likely a much better predictor of divorce rate. ``` rng_key, rng_key_ = random.split(rng_key) mcmc.run(rng_key_, age=dset.AgeScaled.values, divorce=dset.DivorceScaled.values) mcmc.print_summary() samples_2 = mcmc.get_samples() posterior_mu = ( jnp.expand_dims(samples_2["a"], -1) + jnp.expand_dims(samples_2["bA"], -1) * dset.AgeScaled.values ) mean_mu = jnp.mean(posterior_mu, axis=0) hpdi_mu = hpdi(posterior_mu, 0.9) ax = plot_regression(dset.AgeScaled.values, mean_mu, hpdi_mu) ax.set( xlabel="Median marriage age", ylabel="Divorce rate", title="Regression line with 90% CI", ); rng_key, rng_key_ = random.split(rng_key) predictions_2 = Predictive(model, samples_2)(rng_key_, age=dset.AgeScaled.values)["obs"] mean_pred = jnp.mean(predictions_2, axis=0) hpdi_pred = hpdi(predictions_2, 0.9) ax = plot_regression(dset.AgeScaled.values, mean_pred, hpdi_pred) ax.set(xlabel="Median Age", ylabel="Divorce rate", title="Predictions with 90% CI"); rng_key, rng_key_ = random.split(rng_key) print( "Log posterior predictive density: {}".format( log_pred_density( rng_key_, samples_2, model, age=dset.AgeScaled.values, divorce=dset.DivorceScaled.values, ) ) ) ``` ### Model 3: Predictor - Marriage Rate and Median Age of Marriage Finally, we will also model divorce rate as depending on both marriage rate as well as the median age of marriage. Note that the model's posterior predictive density is similar to Model 2 which likely indicates that the marginal information from marriage rate in predicting divorce rate is low when the median age of marriage is already known. ``` rng_key, rng_key_ = random.split(rng_key) mcmc.run( rng_key_, marriage=dset.MarriageScaled.values, age=dset.AgeScaled.values, divorce=dset.DivorceScaled.values, ) mcmc.print_summary() samples_3 = mcmc.get_samples() rng_key, rng_key_ = random.split(rng_key) print( "Log posterior predictive density: {}".format( log_pred_density( rng_key_, samples_3, model, marriage=dset.MarriageScaled.values, age=dset.AgeScaled.values, divorce=dset.DivorceScaled.values, ) ) ) ``` ### Divorce Rate Residuals by State The regression plots above shows that the observed divorce rates for many states differs considerably from the mean regression line. To dig deeper into how the last model (Model 3) under-predicts or over-predicts for each of the states, we will plot the posterior predictive and residuals (`Observed divorce rate - Predicted divorce rate`) for each of the states. ``` # Predictions for Model 3. rng_key, rng_key_ = random.split(rng_key) predictions_3 = Predictive(model, samples_3)( rng_key_, marriage=dset.MarriageScaled.values, age=dset.AgeScaled.values )["obs"] y = jnp.arange(50) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 16)) pred_mean = jnp.mean(predictions_3, axis=0) pred_hpdi = hpdi(predictions_3, 0.9) residuals_3 = dset.DivorceScaled.values - predictions_3 residuals_mean = jnp.mean(residuals_3, axis=0) residuals_hpdi = hpdi(residuals_3, 0.9) idx = jnp.argsort(residuals_mean) # Plot posterior predictive ax[0].plot(jnp.zeros(50), y, "--") ax[0].errorbar( pred_mean[idx], y, xerr=pred_hpdi[1, idx] - pred_mean[idx], marker="o", ms=5, mew=4, ls="none", alpha=0.8, ) ax[0].plot(dset.DivorceScaled.values[idx], y, marker="o", ls="none", color="gray") ax[0].set( xlabel="Posterior Predictive (red) vs. Actuals (gray)", ylabel="State", title="Posterior Predictive with 90% CI", ) ax[0].set_yticks(y) ax[0].set_yticklabels(dset.Loc.values[idx], fontsize=10) # Plot residuals residuals_3 = dset.DivorceScaled.values - predictions_3 residuals_mean = jnp.mean(residuals_3, axis=0) residuals_hpdi = hpdi(residuals_3, 0.9) err = residuals_hpdi[1] - residuals_mean ax[1].plot(jnp.zeros(50), y, "--") ax[1].errorbar( residuals_mean[idx], y, xerr=err[idx], marker="o", ms=5, mew=4, ls="none", alpha=0.8 ) ax[1].set(xlabel="Residuals", ylabel="State", title="Residuals with 90% CI") ax[1].set_yticks(y) ax[1].set_yticklabels(dset.Loc.values[idx], fontsize=10); ``` The plot on the left shows the mean predictions with 90% CI for each of the states using Model 3. The gray markers indicate the actual observed divorce rates. The right plot shows the residuals for each of the states, and both these plots are sorted by the residuals, i.e. at the bottom, we are looking at states where the model predictions are higher than the observed rates, whereas at the top, the reverse is true. Overall, the model fit seems good because most observed data points like within a 90% CI around the mean predictions. However, notice how the model over-predicts by a large margin for states like Idaho (bottom left), and on the other end under-predicts for states like Maine (top right). This is likely indicative of other factors that we are missing out in our model that affect divorce rate across different states. Even ignoring other socio-political variables, one such factor that we have not yet modeled is the measurement noise given by `Divorce SE` in the dataset. We will explore this in the next section. ## Regression Model with Measurement Error Note that in our previous models, each data point influences the regression line equally. Is this well justified? We will build on the previous model to incorporate measurement error given by `Divorce SE` variable in the dataset. Incorporating measurement noise will be useful in ensuring that observations that have higher confidence (i.e. lower measurement noise) have a greater impact on the regression line. On the other hand, this will also help us better model outliers with high measurement errors. For more details on modeling errors due to measurement noise, refer to Chapter 14 of [[1](#References)]. To do this, we will reuse Model 3, with the only change that the final observed value has a measurement error given by `divorce_sd` (notice that this has to be standardized since the `divorce` variable itself has been standardized to mean 0 and std 1). ``` def model_se(marriage, age, divorce_sd, divorce=None): a = numpyro.sample("a", dist.Normal(0.0, 0.2)) bM = numpyro.sample("bM", dist.Normal(0.0, 0.5)) M = bM * marriage bA = numpyro.sample("bA", dist.Normal(0.0, 0.5)) A = bA * age sigma = numpyro.sample("sigma", dist.Exponential(1.0)) mu = a + M + A divorce_rate = numpyro.sample("divorce_rate", dist.Normal(mu, sigma)) numpyro.sample("obs", dist.Normal(divorce_rate, divorce_sd), obs=divorce) # Standardize dset["DivorceScaledSD"] = dset["Divorce SE"] / jnp.std(dset.Divorce.values) rng_key, rng_key_ = random.split(rng_key) kernel = NUTS(model_se, target_accept_prob=0.9) mcmc = MCMC(kernel, num_warmup=1000, num_samples=3000) mcmc.run( rng_key_, marriage=dset.MarriageScaled.values, age=dset.AgeScaled.values, divorce_sd=dset.DivorceScaledSD.values, divorce=dset.DivorceScaled.values, ) mcmc.print_summary() samples_4 = mcmc.get_samples() ``` ### Effect of Incorporating Measurement Noise on Residuals Notice that our values for the regression coefficients is very similar to Model 3. However, introducing measurement noise allows us to more closely match our predictive distribution to the observed values. We can see this if we plot the residuals as earlier. ``` rng_key, rng_key_ = random.split(rng_key) predictions_4 = Predictive(model_se, samples_4)( rng_key_, marriage=dset.MarriageScaled.values, age=dset.AgeScaled.values, divorce_sd=dset.DivorceScaledSD.values, )["obs"] sd = dset.DivorceScaledSD.values residuals_4 = dset.DivorceScaled.values - predictions_4 residuals_mean = jnp.mean(residuals_4, axis=0) residuals_hpdi = hpdi(residuals_4, 0.9) err = residuals_hpdi[1] - residuals_mean idx = jnp.argsort(residuals_mean) y = jnp.arange(50) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 16)) # Plot Residuals ax.plot(jnp.zeros(50), y, "--") ax.errorbar( residuals_mean[idx], y, xerr=err[idx], marker="o", ms=5, mew=4, ls="none", alpha=0.8 ) # Plot SD ax.errorbar(residuals_mean[idx], y, xerr=sd[idx], ls="none", color="orange", alpha=0.9) # Plot earlier mean residual ax.plot( jnp.mean(dset.DivorceScaled.values - predictions_3, 0)[idx], y, ls="none", marker="o", ms=6, color="black", alpha=0.6, ) ax.set(xlabel="Residuals", ylabel="State", title="Residuals with 90% CI") ax.set_yticks(y) ax.set_yticklabels(dset.Loc.values[idx], fontsize=10) ax.text( -2.8, -7, "Residuals (with error-bars) from current model (in red). " "Black marker \nshows residuals from the previous model (Model 3). " "Measurement \nerror is indicated by orange bar.", ); ``` The plot above shows the residuals for each of the states, along with the measurement noise given by inner error bar. The gray dots are the mean residuals from our earlier Model 3. Notice how having an additional degree of freedom to model the measurement noise has shrunk the residuals. In particular, for Idaho and Maine, our predictions are now much closer to the observed values after incorporating measurement noise in the model. To better see how measurement noise affects the movement of the regression line, let us plot the residuals with respect to the measurement noise. ``` fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 6)) x = dset.DivorceScaledSD.values y1 = jnp.mean(residuals_3, 0) y2 = jnp.mean(residuals_4, 0) ax.plot(x, y1, ls="none", marker="o") ax.plot(x, y2, ls="none", marker="o") for i, (j, k) in enumerate(zip(y1, y2)): ax.plot([x[i], x[i]], [j, k], "--", color="gray") ax.set( xlabel="Measurement Noise", ylabel="Residual", title="Mean residuals (Model 4: red, Model 3: blue)", ); ``` The plot above shows what has happend in more detail - the regression line itself has moved to ensure a better fit for observations with low measurement noise (left of the plot) where the residuals have shrunk very close to 0. That is to say that data points with low measurement error have a concomitantly higher contribution in determining the regression line. On the other hand, for states with high measurement error (right of the plot), incorporating measurement noise allows us to move our posterior distribution mass closer to the observations resulting in a shrinkage of residuals as well. ## References 1. McElreath, R. (2016). Statistical Rethinking: A Bayesian Course with Examples in R and Stan CRC Press. 2. Stan Development Team. [Stan User's Guide](https://mc-stan.org/docs/2_19/stan-users-guide/index.html) 3. Goodman, N.D., and StuhlMueller, A. (2014). [The Design and Implementation of Probabilistic Programming Languages](http://dippl.org/) 4. Pyro Development Team. [Poutine: A Guide to Programming with Effect Handlers in Pyro](http://pyro.ai/examples/effect_handlers.html) 5. Hoffman, M.D., Gelman, A. (2011). The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo. 6. Betancourt, M. (2017). A Conceptual Introduction to Hamiltonian Monte Carlo. 7. JAX Development Team (2018). [Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more](https://github.com/google/jax) 8. Gelman, A., Hwang, J., and Vehtari A. [Understanding predictive information criteria for Bayesian models](https://arxiv.org/pdf/1307.5928.pdf)
github_jupyter
# Customizing datasets in fastai ``` from fastai import * from fastai.gen_doc.nbdoc import * from fastai.vision import * ``` In this tutorial, we'll see how to create custom subclasses of [`ItemBase`](/core.html#ItemBase) or [`ItemList`](/data_block.html#ItemList) while retaining everything the fastai library has to offer. To allow basic functions to work consistently across various applications, the fastai library delegates several tasks to one of those specific objets, and we'll see here which methods you have to implement to be able to have everything work properly. But first let's see take a step back to see where you'll use your end result. ## Links with the data block API The data block API works by allowing you to pick a class that is responsible to get your items and another class that is charged with getting your targets. Combined together, they create a pytorch [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) that is then wrapped inside a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). The training set, validation set and maybe test set are then all put in a [`DataBunch`](/basic_data.html#DataBunch). The data block API allows you to mix and match what class your inputs have, what clas you target have, how to do the split between train and validation set, then how to create the [`DataBunch`](/basic_data.html#DataBunch), but if you have a very specific kind of input/target, the fastai classes might no be sufficient to you. This tutorial is there to explain what is needed to create a new class of items and what methods are important to implement or override. It goes in two phases: first we focus on what you need to create a custom [`ItemBase`](/core.html#ItemBase) class (which the type of your inputs/targets) then on how to create your custom [`ItemList`](/data_block.html#ItemList) (which is basically a set of [`ItemBase`](/core.html#ItemBase)) while highlining which methods are called by the library. ## Creating a custom [`ItemBase`](/core.html#ItemBase) subclass The fastai library contains three basic type of [`ItemBase`](/core.html#ItemBase) that you might want to subclass: - [`Image`](/vision.image.html#Image) for vision applications - [`Text`](/text.data.html#Text) for text applications - [`TabularLine`](/tabular.data.html#TabularLine) for tabular applications Whether you decide to create your own item class or to subclass one of the above, here is what you need to implement: ### Basic attributes Those are the more importants attribute your custom [`ItemBase`](/core.html#ItemBase) needs as they're used everywhere in the fastai library: - `ItemBase.data` is the thing that is passed to pytorch when you want to create a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). This is what needs to be fed to your model. Note that it might be different from the representation of your item since you might want something that is more understandable. - `ItemBase.obj` is the thing that truly represents the underlying object behind your item. It should be sufficient to create a copy of your item. For instance, when creating the test set, the basic label is the `obj` attribute of the first label (or y) in the training set. - `__str__` representation: if applicable, this is what will be displayed when the fastai library has to show your item. If we take the example of a [`MultiCategory`](/core.html#MultiCategory) object `o` for instance: - `o.obj` is the list of tags that object has - `o.data` is a tensor where the tags are one-hot encoded - `str(o)` returns the tags separated by ; If you want to code the way data augmentation should be applied to your custom `Item`, you should write an `apply_tfms` method. This is what will be called if you apply a [`transform`](/vision.transform.html#vision.transform) block in the data block API. ### Advanced show methods If you want to use methods such a `data.show_batch()` or `learn.show_results()` with a brand new kind of [`ItemBase`](/core.html#ItemBase) you will need to implement two other methods. In both cases, the generic function will grab the tensors of inputs, targets and predictions (if applicable), reconstruct the corespoding [`ItemBase`](/core.html#ItemBase) (see below) but it will delegate to the [`ItemBase`](/core.html#ItemBase) the way to display the results. ``` python def show_xys(self, xs, ys, **kwargs)->None: def show_xyzs(self, xs, ys, zs, **kwargs)->None: ``` In both cases `xs` and `ys` represent the inputs and the targets, in the second case `zs` represent the predictions. They are lists of the same length that depend on the `rows` argument you passed. The kwargs are passed from `data.show_batch()` / `learn.show_results()`. As an example, here is the source code of those methods in [`Image`](/vision.image.html#Image): ``` python def show_xys(self, xs, ys, figsize:Tuple[int,int]=(9,10), **kwargs): "Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method." rows = int(math.sqrt(len(xs))) fig, axs = plt.subplots(rows,rows,figsize=figsize) for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]): xs[i].show(ax=ax, y=ys[i], **kwargs) plt.tight_layout() def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs): """Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`. `kwargs` are passed to the show method.""" figsize = ifnone(figsize, (6,3*len(xs))) fig,axs = plt.subplots(len(xs), 2, figsize=figsize) fig.suptitle('Ground truth / Predictions', weight='bold', size=14) for i,(x,y,z) in enumerate(zip(xs,ys,zs)): x.show(ax=axs[i,0], y=y, **kwargs) x.show(ax=axs[i,1], y=z, **kwargs) ``` ### Example: ImageTuple For cycleGANs, we need to create a custom type of items since we feed the model tuples of images. Let's look at how to code this. The basis is to code the `obj` and [`data`](/vision.data.html#vision.data) attributes. We do that in the init. The object is the tuple of images and the data their underlying tensors normalized between -1 and 1. ``` class ImageTuple(ItemBase): def __init__(self, img1, img2): self.img1,self.img2 = img1,img2 self.obj,self.data = (img1,img2),[-1+2*img1.data,-1+2*img2.data] ``` Then we want to apply data augmentation to our tuple of images. That's done by writing and `apply_tfms` method as we saw before. Here we just pass that call to the two underlying images then update the data. ``` def apply_tfms(self, tfms, **kwargs): self.img1 = self.img1.apply_tfms(tfms, **kwargs) self.img2 = self.img2.apply_tfms(tfms, **kwargs) self.data = [-1+2*self.img1.data,-1+2*self.img2.data] return self ``` We define a last method to stack the two images next ot each other, which we will use later for a customized `show_batch`/ `show_results` behavior. ``` def to_one(self): return Image(0.5+torch.cat(self.data,2)/2) ``` This is all your need to create your custom [`ItemBase`](/core.html#ItemBase). You won't be able to use it until you have put it inside your custom [`ItemList`](/data_block.html#ItemList) though, so you should continue reading the next section. ## Creating a custom [`ItemList`](/data_block.html#ItemList) subclass This is the main class that allows you to group your inputs or your targets in the data block API. You can then use any of the splitting or labelling methods before creating a [`DataBunch`](/basic_data.html#DataBunch). To make sure everything is properly working, her eis what you need to know. ### Class variables Whether you're directly subclassing [`ItemList`](/data_block.html#ItemList) or one of the particular fastai ones, make sure to know the content of the following three variables as you may need to adjust them: - `_bunch` contains the name of the class that will be used to create a [`DataBunch`](/basic_data.html#DataBunch) - `_processor` contains a class (or a list of classes) of [`PreProcessor`](/data_block.html#PreProcessor) that will then be used as the default to create processor for this [`ItemList`](/data_block.html#ItemList) - `_label_cls` contains the class that will be used to create the labels by default `_label_cls` is the first to be used in the data block API, in the labelling function. If this variable is set to `None`, the label class will be guessed between [`CategoryList`](/data_block.html#CategoryList), [`MultiCategoryList`](/data_block.html#MultiCategoryList) and [`FloatList`](/data_block.html#FloatList) depending on the type of the first item. The default can be overriden by passing a `label_cls` in the kwargs of the labelling function. `_processor` is the second to be used. The processors are called at the end of the labelling to apply some kind of function on your items. The default processor of the inputs can be overriden by passing a `processor` in the kwargs when creating the [`ItemList`](/data_block.html#ItemList), the default processor of the targets can be overriden by passing a `processor` in the kwargs of the labelling function. Processors are useful for pre-processing some data, but you also need to put in their state any variable you want to save for the call of `data.export()` before creating a [`Learner`](/basic_train.html#Learner) object for inference: the state of the [`ItemList`](/data_block.html#ItemList) isn't saved there, only their processors. For instance `SegmentationProcessor` only reason to exist is to save the dataset classes, and during the process call, it doesn't do anything apart from setting the `classes` and `c` attributes to its dataset. ``` python class SegmentationProcessor(PreProcessor): def __init__(self, ds:ItemList): self.classes = ds.classes def process(self, ds:ItemList): ds.classes,ds.c = self.classes,len(self.classes) ``` `_bunch` is the last class variable usd in the data block. When you type the final `databunch()`, the data block API calls the `_bunch.create` method with the `_bunch` of the inputs. ### Keeping \_\_init\_\_ arguments If you pass additional arguments in your `__init__` call that you save in the state of your [`ItemList`](/data_block.html#ItemList), be wary to also pass them along in the `new` method as this one is used to create your training and validation set when splitting. The basic scheme is: ``` python class MyCustomItemList(ItemList): def __init__(self, items, my_arg, **kwargs): self.my_arg = my_arg super().__init__(items, **kwargs) def new(self, items, **kwargs): return super().new(items, self.my_arg, **kwargs) ``` Be sure to keep the kwargs as is, as they contain all the additional stuff you can pass to an [`ItemList`](/data_block.html#ItemList). ### Important methods #### - get The most important method you have to implement is `get`: this one will explain your custom [`ItemList`](/data_block.html#ItemList) how to general an [`ItemBase`](/core.html#ItemBase) from the thign stored in its `items` array. For instance an [`ImageItemList`](/vision.data.html#ImageItemList) has the following `get` method: ``` python def get(self, i): fn = super().get(i) res = self.open(fn) self.sizes[i] = res.size return res ``` The first line basically looks at `self.items[i]` (which is a filename). The second line opens it since the `open`method is just ``` python def open(self, fn): return open_image(fn) ``` The third line is there for [`ImagePoints`](/vision.image.html#ImagePoints) or [`ImageBBox`](/vision.image.html#ImageBBox) targets that require the size of the input [`Image`](/vision.image.html#Image) to be created. Note that if you are building a custom target class and you need the size of an image, you should call `self.x.size[i]`. ``` jekyll_note("""If you just want to customize the way an `Image` is opened, subclass `Image` and just change the `open` method.""") ``` #### - reconstruct This is the method that is called in `data.show_batch()`, `learn.predict()` or `learn.show_results()` to transform a pytorch tensor back in an [`ItemBase`](/core.html#ItemBase). In a way, it does the opposite of calling `ItemBase.data`. It should take a tensor `t` and return the same king of thing as the `get` method. In some situations ([`ImagePoints`](/vision.image.html#ImagePoints), [`ImageBBox`](/vision.image.html#ImageBBox) for instance) you need to have a look at the corresponding input to rebuild your item. In this case, you should have a second argument called `x` (don't change that name). For instance, here is the `reconstruct` method of [`PointsItemList`](/vision.data.html#PointsItemList): ```python def reconstruct(self, t, x): return ImagePoints(FlowField(x.size, t), scale=False) ``` #### - analyze_pred This is the method that is called in `learn.predict()` or `learn.show_results()` to transform predictions in an output tensor suitable for `reconstruct`. For instance we may need to take the maximum argument (for [`Category`](/core.html#Category)) or the predictions greater than a certain threshold (for [`MultiCategory`](/core.html#MultiCategory)). It should take a tensor, along with optional kwargs and return a tensor. For instance, here is the `anaylze_pred` method of [`MultiCategoryList`](/data_block.html#MultiCategoryList): ```python def analyze_pred(self, pred, thresh:float=0.5): return (pred >= thresh).float() ``` `thresh` can then be passed as kwarg during the calls to `learn.predict()` or `learn.show_results()`. ### Advanced show methods If you want to use methods such a `data.show_batch()` or `learn.show_results()` with a brand new kind of [`ItemBase`](/core.html#ItemBase) you will need to implement two other methods. In both cases, the generic function will grab the tensors of inputs, targets and predictions (if applicable), reconstruct the coresponding (as seen before) but it will delegate to the [`ItemList`](/data_block.html#ItemList) the way to display the results. ``` python def show_xys(self, xs, ys, **kwargs)->None: def show_xyzs(self, xs, ys, zs, **kwargs)->None: ``` In both cases `xs` and `ys` represent the inputs and the targets, in the second case `zs` represent the predictions. They are lists of the same length that depend on the `rows` argument you passed. The kwargs are passed from `data.show_batch()` / `learn.show_results()`. As an example, here is the source code of those methods in [`ImageItemList`](/vision.data.html#ImageItemList): ``` python def show_xys(self, xs, ys, figsize:Tuple[int,int]=(9,10), **kwargs): "Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method." rows = int(math.sqrt(len(xs))) fig, axs = plt.subplots(rows,rows,figsize=figsize) for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]): xs[i].show(ax=ax, y=ys[i], **kwargs) plt.tight_layout() def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs): """Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`. `kwargs` are passed to the show method.""" figsize = ifnone(figsize, (6,3*len(xs))) fig,axs = plt.subplots(len(xs), 2, figsize=figsize) fig.suptitle('Ground truth / Predictions', weight='bold', size=14) for i,(x,y,z) in enumerate(zip(xs,ys,zs)): x.show(ax=axs[i,0], y=y, **kwargs) x.show(ax=axs[i,1], y=z, **kwargs) ``` Linked to this method is the class variable `_show_square` of an [`ItemList`](/data_block.html#ItemList). It defaults to `False` but if it's `True`, the `show_batch` method will send `rows * rows` `xs` and `ys` to `show_xys` (so that it shows a square of inputs/targets), like here for iamges. ### Example: ImageTupleList Continuing our custom item example, we create a custom [`ItemList`](/data_block.html#ItemList) class that will wrap those `ImageTuple` properly. The first thing is to write a custom `__init__` method (since we need to list of filenames here) which means we also have to change the `new` method. ``` class ImageTupleList(ImageItemList): def __init__(self, items, itemsB=None, **kwargs): self.itemsB = itemsB super().__init__(items, **kwargs) def new(self, items, **kwargs): return super().new(items, itemsB=self.itemsB, **kwargs) ``` We then specify how to get one item. Here we pass the image in the first list of items, and pick one randomly in the second list. ``` def get(self, i): img1 = super().get(i) fn = self.itemsB[random.randint(0, len(self.itemsB)-1)] return ImageTuple(img1, open_image(fn)) ``` We also add a custom factory method to directly create an `ImageTupleList` from two folders. ``` @classmethod def from_folders(cls, path, folderA, folderB, **kwargs): itemsB = ImageItemList.from_folder(path/folderB).items res = super().from_folder(path/folderA, itemsB=itemsB, **kwargs) res.path = path return res ``` Finally, we have to specify how to reconstruct the `ImageTuple` from tensors if we want `show_batch` to work. We recreate the images and denormalize. ``` def reconstruct(self, t:Tensor): return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5)) ``` There is no need to write a `analyze_preds` method since the default behavior (returning the output tensor) is what we need here. However `show_results` won't work properly unless the target (which we don't really care about here) has the right `reconstruct` method: the fastai library uses the `reconstruct` method of the target on the outputs. That's why we create another custom [`ItemList`](/data_block.html#ItemList) with just that `reconstruct` method. The first line is to reconstruct our dummy targets, and the second one is the same as in `ImageTupleList`. ``` class TargetTupleList(ItemList): def reconstruct(self, t:Tensor): if len(t.size()) == 0: return t return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5)) ``` To make sure our `ImageTupleList` uses that for labelling, we pass it in `_label_cls` and this is what the result looks like. ``` class ImageTupleList(ImageItemList): _label_cls=TargetTupleList def __init__(self, items, itemsB=None, **kwargs): self.itemsB = itemsB super().__init__(items, **kwargs) def new(self, items, **kwargs): return super().new(items, itemsB=self.itemsB, **kwargs) def get(self, i): img1 = super().get(i) fn = self.itemsB[random.randint(0, len(self.itemsB)-1)] return ImageTuple(img1, open_image(fn)) def reconstruct(self, t:Tensor): return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5)) @classmethod def from_folders(cls, path, folderA, folderB, **kwargs): itemsB = ImageItemList.from_folder(path/folderB).items res = super().from_folder(path/folderA, itemsB=itemsB, **kwargs) res.path = path return res ``` Lastly, we want to customize the behavior of `show_batch` and `show_results`. Remember the `to_one` method just puts the two images next to each other. ``` def show_xys(self, xs, ys, figsize:Tuple[int,int]=(12,6), **kwargs): "Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method." rows = int(math.sqrt(len(xs))) fig, axs = plt.subplots(rows,rows,figsize=figsize) for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]): xs[i].to_one().show(ax=ax, **kwargs) plt.tight_layout() def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs): """Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`. `kwargs` are passed to the show method.""" figsize = ifnone(figsize, (12,3*len(xs))) fig,axs = plt.subplots(len(xs), 2, figsize=figsize) fig.suptitle('Ground truth / Predictions', weight='bold', size=14) for i,(x,z) in enumerate(zip(xs,zs)): x.to_one().show(ax=axs[i,0], **kwargs) z.to_one().show(ax=axs[i,1], **kwargs) ```
github_jupyter
# Creating a Real-Time Inferencing Service You've spent a lot of time in this course training and registering machine learning models. Now it's time to deploy a model as a real-time service that clients can use to get predictions from new data. ## Connect to Your Workspace The first thing you need to do is to connect to your workspace using the Azure ML SDK. > **Note**: If the authenticated session with your Azure subscription has expired since you completed the previous exercise, you'll be prompted to reauthenticate. ``` import azureml.core from azureml.core import Workspace # Load the workspace from the saved config file ws = Workspace.from_config() print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name)) ``` ## Deploy a Model as a Web Service You have trained and registered a machine learning model that classifies patients based on the likelihood of them having diabetes. This model could be used in a production environment such as a doctor's surgery where only patients deemed to be at risk need to be subjected to a clinical test for diabetes. To support this scenario, you will deploy the model as a web service. First, let's determine what models you have registered in the workspace. ``` from azureml.core import Model for model in Model.list(ws): print(model.name, 'version:', model.version) for tag_name in model.tags: tag = model.tags[tag_name] print ('\t',tag_name, ':', tag) for prop_name in model.properties: prop = model.properties[prop_name] print ('\t',prop_name, ':', prop) print('\n') ``` Right, now let's get the model that we want to deploy. By default, if we specify a model name, the latest version will be returned. ``` model = ws.models['diabetes_model'] print(model.name, 'version', model.version) ``` We're going to create a web service to host this model, and this will require some code and configuration files; so let's create a folder for those. ``` import os folder_name = 'diabetes_service' # Create a folder for the web service files experiment_folder = './' + folder_name os.makedirs(folder_name, exist_ok=True) print(folder_name, 'folder created.') ``` The web service where we deploy the model will need some Python code to load the input data, get the model from the workspace, and generate and return predictions. We'll save this code in an *entry script* that will be deployed to the web service: ``` %%writefile $folder_name/score_diabetes.py import json import joblib import numpy as np from azureml.core.model import Model # Called when the service is loaded def init(): global model # Get the path to the deployed model file and load it model_path = Model.get_model_path('diabetes_model') model = joblib.load(model_path) # Called when a request is received def run(raw_data): # Get the input data as a numpy array data = np.array(json.loads(raw_data)['data']) # Get a prediction from the model predictions = model.predict(data) # Get the corresponding classname for each prediction (0 or 1) classnames = ['not-diabetic', 'diabetic'] predicted_classes = [] for prediction in predictions: predicted_classes.append(classnames[prediction]) # Return the predictions as JSON return json.dumps(predicted_classes) ``` The web service will be hosted in a container, and the container will need to install any required Python dependencies when it gets initialized. In this case, our scoring code requires **scikit-learn**, so we'll create a .yml file that tells the container host to install this into the environment. ``` from azureml.core.conda_dependencies import CondaDependencies # Add the dependencies for our model (AzureML defaults is already included) myenv = CondaDependencies() myenv.add_conda_package('scikit-learn') # Save the environment config as a .yml file env_file = folder_name + "/diabetes_env.yml" with open(env_file,"w") as f: f.write(myenv.serialize_to_string()) print("Saved dependency info in", env_file) # Print the .yml file with open(env_file,"r") as f: print(f.read()) ``` Now you're ready to deploy. We'll deploy the container a service named **diabetes-service**. The deployment process includes the following steps: 1. Define an inference configuration, which includes the scoring and environment files required to load and use the model. 2. Define a deployment configuration that defines the execution environment in which the service will be hosted. In this case, an Azure Container Instance. 3. Deploy the model as a web service. 4. Verify the status of the deployed service. > **More Information**: For more details about model deployment, and options for target execution environments, see the [documentation](https://docs.microsoft.com/azure/machine-learning/how-to-deploy-and-where). Deployment will take some time as it first runs a process to create a container image, and then runs a process to create a web service based on the image. When deployment has completed successfully, you'll see a status of **Healthy**. ``` from azureml.core.webservice import AciWebservice from azureml.core.model import InferenceConfig # Configure the scoring environment inference_config = InferenceConfig(runtime= "python", source_directory = folder_name, entry_script="score_diabetes.py", conda_file="diabetes_env.yml") deployment_config = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1) service_name = "diabetes-service" service = Model.deploy(ws, service_name, [model], inference_config, deployment_config) service.wait_for_deployment(True) print(service.state) ``` Hopefully, the deployment has been successful and you can see a status of **Healthy**. If not, you can use the following code to check the status and get the service logs to help you troubleshoot. ``` print(service.state) print(service.get_logs()) # If you need to make a change and redeploy, you may need to delete unhealthy service using the following code: #service.delete() ``` Take a look at your workspace in [Azure ML Studio](https://ml.azure.com) and view the **Endpoints** page, which shows the deployed services in your workspace. You can also retrieve the names of web services in your workspace by running the following code: ``` for webservice_name in ws.webservices: print(webservice_name) ``` ## Use the Web Service With the service deployed, now you can consume it from a client application. ``` import json x_new = [[2,180,74,24,21,23.9091702,1.488172308,22]] print ('Patient: {}'.format(x_new[0])) # Convert the array to a serializable list in a JSON document input_json = json.dumps({"data": x_new}) # Call the web service, passing the input data (the web service will also accept the data in binary format) predictions = service.run(input_data = input_json) # Get the predicted class - it'll be the first (and only) one. predicted_classes = json.loads(predictions) print(predicted_classes[0]) ``` You can also send multiple patient observations to the service, and get back a prediction for each one. ``` import json # This time our input is an array of two feature arrays x_new = [[2,180,74,24,21,23.9091702,1.488172308,22], [0,148,58,11,179,39.19207553,0.160829008,45]] # Convert the array or arrays to a serializable list in a JSON document input_json = json.dumps({"data": x_new}) # Call the web service, passing the input data predictions = service.run(input_data = input_json) # Get the predicted classes. predicted_classes = json.loads(predictions) for i in range(len(x_new)): print ("Patient {}".format(x_new[i]), predicted_classes[i] ) ``` The code above uses the Azure ML SDK to connect to the containerized web service and use it to generate predictions from your diabetes classification model. In production, a model is likely to be consumed by business applications that do not use the Azure ML SDK, but simply make HTTP requests to the web service. Let's determine the URL to which these applications must submit their requests: ``` endpoint = service.scoring_uri print(endpoint) ``` Now that you know the endpoint URI, an application can simply make an HTTP request, sending the patient data in JSON (or binary) format, and receive back the predicted class(es). ``` import requests import json x_new = [[2,180,74,24,21,23.9091702,1.488172308,22], [0,148,58,11,179,39.19207553,0.160829008,45]] # Convert the array to a serializable list in a JSON document input_json = json.dumps({"data": x_new}) # Set the content type headers = { 'Content-Type':'application/json' } predictions = requests.post(endpoint, input_json, headers = headers) predicted_classes = json.loads(predictions.json()) for i in range(len(x_new)): print ("Patient {}".format(x_new[i]), predicted_classes[i] ) ``` You've deployed your web service as an Azure Container Instance (ACI) service that requires no authentication. This is fine for development and testing, but for production you should consider deploying to an Azure Kubernetes Service (AKS) cluster and enabling authentication. This would require REST requests to include an **Authorization** header. ## Delete the Service When you no longer need your service, you should delete it to avoid incurring unecessary charges. ``` service.delete() print ('Service deleted.') ``` For more information about publishing a model as a service, see the [documentation](https://docs.microsoft.com/azure/machine-learning/how-to-deploy-and-where)
github_jupyter
<a href="https://colab.research.google.com/github/bhuiyanmobasshir94/Cow-weight-and-Breed-Prediction/blob/main/notebooks/031_dec.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import numpy as np import pandas as pd import sys import os import PIL import PIL.Image import tensorflow as tf import tensorflow.keras as keras from tensorflow.keras import layers import tensorflow_datasets as tfds import pathlib from sklearn.model_selection import train_test_split from sklearn import preprocessing ## Globals YT_IMAGE_TO_TAKE = 4 images_dataset_url = "https://cv-datasets-2021.s3.amazonaws.com/images.tar.gz" images_data_dir = tf.keras.utils.get_file(origin=images_dataset_url, fname='images', untar=True) images_data_dir = pathlib.Path(images_data_dir) yt_images_dataset_url = "https://cv-datasets-2021.s3.amazonaws.com/yt_images.tar.gz" yt_images_data_dir = tf.keras.utils.get_file(origin=yt_images_dataset_url, fname='yt_images', untar=True) yt_images_data_dir = pathlib.Path(yt_images_data_dir) if sys.platform == 'darwin': os.system(f"dot_clean {images_data_dir}") os.system(f"dot_clean {yt_images_data_dir}") elif sys.platform.startswith("lin"): os.system(f"cd {images_data_dir} && find . -type f -name '._*' -delete") os.system(f"cd {yt_images_data_dir} && find . -type f -name '._*' -delete") image_count = len(list(images_data_dir.glob('*/*.jpg'))) print(image_count) yt_image_count = len(list(yt_images_data_dir.glob('*/*.jpg'))) print(yt_image_count) df = pd.read_csv("https://cv-datasets-2021.s3.amazonaws.com/dataset.csv") df.shape df.columns df.head(2) images = list(images_data_dir.glob('*/*.jpg')) yt_images = list(yt_images_data_dir.glob('*/*.jpg')) min_height = 0 max_height = 0 min_width = 0 max_width = 0 for i, image in enumerate(images): w, h = PIL.Image.open(str(image)).size if i == 0: min_height = h max_height = h min_width = w max_width = w if h <= min_height: min_height = h if h >= max_height: max_height = h if w <= min_width: min_width = w if w >= max_width: max_width = w print(f"min_height: {min_height}") print(f"min_width: {min_width}") print(f"max_height: {max_height}") print(f"max_width: {max_width}") min_height = 0 max_height = 0 min_width = 0 max_width = 0 for i, image in enumerate(yt_images): w, h = PIL.Image.open(str(image)).size if i == 0: min_height = h max_height = h min_width = w max_width = w if h <= min_height: min_height = h if h >= max_height: max_height = h if w <= min_width: min_width = w if w >= max_width: max_width = w print(f"min_height: {min_height}") print(f"min_width: {min_width}") print(f"max_height: {max_height}") print(f"max_width: {max_width}") f_df = pd.DataFrame(columns = ['file_path', 'teeth', 'age_in_year', 'breed', 'height_in_inch', 'weight_in_kg']) for index, row in df.iterrows(): images = list(images_data_dir.glob(f"{row['sku']}/*.jpg")) yt_images = list(yt_images_data_dir.glob(f"{row['sku']}/*.jpg")) for image in images: f_df = f_df.append({'file_path' : image, 'teeth' : row['teeth'], 'age_in_year' : row['age_in_year'], 'breed': row['breed'], 'height_in_inch': row['height_in_inch'], 'weight_in_kg': row['weight_in_kg']}, ignore_index = True) for idx, image in enumerate(yt_images): if idx == (YT_IMAGE_TO_TAKE - 1): break f_df = f_df.append({'file_path' : image, 'teeth' : row['teeth'], 'age_in_year' : row['age_in_year'], 'breed': row['breed'], 'height_in_inch': row['height_in_inch'], 'weight_in_kg': row['weight_in_kg']}, ignore_index = True) f_df.shape f_df.head(1) def label_encode(df): teeth_le = preprocessing.LabelEncoder() df['teeth']= teeth_le.fit_transform(df['teeth']) breed_le = preprocessing.LabelEncoder() df['breed']= breed_le.fit_transform(df['breed']) age_in_year_le = preprocessing.LabelEncoder() df['age_in_year']= age_in_year_le.fit_transform(df['age_in_year']) print(teeth_le.classes_) print(breed_le.classes_) print(age_in_year_le.classes_) return df def inverse_transform(le, series=[]): return le.inverse_transform(series) f_df = label_encode(f_df) # train_df, valid_test_df = train_test_split(f_df, test_size=0.3) # validation_df, test_df = train_test_split(valid_test_df, test_size=0.3) # print(f"train_df: {train_df.shape}") # print(f"validation_df: {validation_df.shape}") # print(f"test_df: {test_df.shape}") train_df, test_df = train_test_split(f_df, test_size=0.1) print(f"train_df: {train_df.shape}") print(f"test_df: {test_df.shape}") # min_height: 450 # min_width: 800 # input: [image, teeth] # outpur: [age_in_year, breed, height_in_inch, weight_in_kg] # class CustomDataGen(tf.keras.utils.Sequence): # def __init__(self, df, X_col, y_col, # batch_size, # input_size=(450, 800, 3), # (input_height, input_width, input_channel) # shuffle=True): # self.df = df.copy() # self.X_col = X_col # self.y_col = y_col # self.batch_size = batch_size # self.input_size = input_size # self.shuffle = shuffle # self.n = len(self.df) # # self.n_teeth = df[X_col['teeth']].max() # # self.n_breed = df[y_col['breed']].nunique() # def on_epoch_end(self): # if self.shuffle: # self.df = self.df.sample(frac=1).reset_index(drop=True) # def __get_input(self, path, target_size): # image = tf.keras.preprocessing.image.load_img(path) # image_arr = tf.keras.preprocessing.image.img_to_array(image) # # image_arr = image_arr[ymin:ymin+h, xmin:xmin+w] # image_arr = tf.image.resize(image_arr,(target_size[0], target_size[1])).numpy() # return image_arr/255. # def __get_output(self, label, num_classes): # return tf.keras.utils.to_categorical(label, num_classes=num_classes) # def __get_data(self, batches): # # Generates data containing batch_size samples # path_batch = batches[self.X_col['file_path']] # # teeth_batch = batches[self.X_col['teeth']] # # breed_batch = batches[self.y_col['breed']] # weight_in_kg_batch = batches[self.y_col['weight_in_kg']] # height_in_inch_batch = batches[self.y_col['height_in_inch']] # age_in_year_batch = batches[self.y_col['age_in_year']] # X0 = np.asarray([self.__get_input(x, self.input_size) for x in path_batch]) # # y0_batch = np.asarray([self.__get_output(y, self.n_teeth) for y in teeth_batch]) # # y1_batch = np.asarray([self.__get_output(y, self.n_breed) for y in breed_batch]) # y0 = np.asarray([tf.cast(y, tf.float32) for y in weight_in_kg_batch]) # y1 = np.asarray([tf.cast(y, tf.float32) for y in height_in_inch_batch]) # y2 = np.asarray([tf.cast(y, tf.float32) for y in age_in_year_batch]) # return X0, tuple([y0, y1, y2]) # def __getitem__(self, index): # batches = self.df[index * self.batch_size:(index + 1) * self.batch_size] # X, y = self.__get_data(batches) # return X, y # def __len__(self): # return self.n // self.batch_size # traingen = CustomDataGen(train_df, # X_col={'file_path':'file_path', 'teeth': 'teeth'}, # y_col={'breed': 'breed', 'weight_in_kg': 'weight_in_kg', 'height_in_inch': 'height_in_inch', 'age_in_year': 'age_in_year'}, # batch_size=128, input_size=(450, 800, 3)) # testgen = CustomDataGen(test_df, # X_col={'file_path':'file_path', 'teeth': 'teeth'}, # y_col={'breed': 'breed', 'weight_in_kg': 'weight_in_kg', 'height_in_inch': 'height_in_inch', 'age_in_year': 'age_in_year'}, # batch_size=128, input_size=(450, 800, 3)) # validgen = CustomDataGen(validation_df, # X_col={'file_path':'file_path', 'teeth': 'teeth'}, # y_col={'breed': 'breed', 'weight_in_kg': 'weight_in_kg', 'height_in_inch': 'height_in_inch', 'age_in_year': 'age_in_year'}, # batch_size=128, input_size=(450, 800, 3)) def __get_input(path, target_size): image = tf.keras.preprocessing.image.load_img(path) image_arr = tf.keras.preprocessing.image.img_to_array(image) image_arr = tf.image.resize(image_arr,(target_size[0], target_size[1])).numpy() return image_arr/255. def data_loader(df, image_size=(450, 800, 3)): y0 = tf.cast(df.weight_in_kg, tf.float32) print(y0.shape) y1 = tf.cast(df.height_in_inch, tf.float32) print(y1.shape) # y2 = tf.cast(df.age_in_year, tf.float32) y2 = keras.utils.to_categorical(df.age_in_year) print(y2.shape) y3 = keras.utils.to_categorical(df.breed) print(y3.shape) path_batch = df.file_path X0 = tf.cast([__get_input(x, image_size) for x in path_batch], tf.float32) print(X0.shape) X1 = keras.utils.to_categorical(df.teeth) print(X1.shape) return (X0, X1), (y0, y1, y2, y3) (X0, X1), (y0, y1, y2, y3) = data_loader(train_df, (150, 150, 3)) # input = keras.Input(shape=(128, 128, 3), name="original_img") # x = layers.Conv2D(64, 3, activation="relu")(input) # x = layers.Conv2D(128, 3, activation="relu")(x) # x = layers.MaxPooling2D(3)(x) # x = layers.Conv2D(128, 3, activation="relu")(x) # x = layers.Conv2D(64, 3, activation="relu")(x) # x = layers.GlobalMaxPooling2D()(x) input0 = keras.Input(shape=(150, 150, 3), name="img") x = layers.Conv2D(32, 3, activation="relu")(input0) x = layers.MaxPooling2D(2)(x) x = layers.Conv2D(32, 3, activation="relu")(x) x = layers.MaxPooling2D(2)(x) x = layers.Conv2D(64, 3, activation="relu")(x) x = layers.GlobalMaxPooling2D()(x) # input1 = keras.Input(shape=(3,), name="teeth") out_a = keras.layers.Dense(1, activation='linear', name='wt_rg')(x) out_b = keras.layers.Dense(1, activation='linear', name='ht_rg')(x) # out_c = keras.layers.Dense(1, activation='linear', name='ag_rg')(x) out_c = keras.layers.Dense(3, activation='softmax', name='ag_3cls')(x) out_d = keras.layers.Dense(8, activation='softmax', name='brd_8cls')(x) encoder = keras.Model( inputs = input0 , outputs = [out_a, out_b, out_c, out_d], name="encoder") encoder.compile( loss = { "wt_rg": tf.keras.losses.MeanSquaredError(), "ht_rg": tf.keras.losses.MeanSquaredError(), # "ag_rg": tf.keras.losses.MeanSquaredError() "ag_3cls": tf.keras.losses.CategoricalCrossentropy(), "brd_8cls": tf.keras.losses.CategoricalCrossentropy() }, metrics = { "wt_rg": 'mse', "ht_rg": 'mse', "ag_3cls": 'accuracy', "brd_8cls": 'accuracy' }, optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001) ) encoder.fit(X0, [y0, y1, y2, y3], epochs=30, verbose=2, batch_size=32, validation_split=0.2) # encoder.output keras.utils.plot_model(encoder, "encoder.png", show_shapes=True) (tX0, tX1), (ty0, ty1, ty2, ty3) = data_loader(test_df, (150, 150, 3)) test_scores = encoder.evaluate(tX0, [ty0, ty1, ty2, ty3], verbose=2) print("Test loss:", test_scores[0]) print("Test accuracy:", test_scores[1]) p1, p2, p3 = encoder.predict([tf.expand_dims(tX0[0], 0), tf.expand_dims(tX1[0], 0)]) # print(p0);ty0[0] print(p1);ty1[0] print(p2.argmax());ty2[0].argmax() print(p3.argmax());ty3[0].argmax() Cattle are commonly raised as livestock for meat (beef or veal, see beef cattle), for milk (see dairy cattle), and for hides, which are used to make leather. They are used as riding animals and draft animals (oxen or bullocks, which pull carts, plows and other implements). Another product of cattle is their dung, which can be used to create manure or fuel. ```
github_jupyter
## 7. Fourier-transzformációs módszer, FFTMethod A kiértékelés a lépései: **betöltés &rarr; előfeldolgozás &rarr; IFFT &rarr; ablakolás &rarr; FFT &rarr; fázis** A programban is hasonló nevű a függvényeket kell meghívni. Az ajánlott sorrend a függvények hívásában a fenti folyamatábra, mivel nem garantált, hogy a tengelyek helyesen fognak transzformálódni tetszőleges sorrendű függvényhívások után. A bemutatást szimulált példákkal kezdem, majd rátérek egy mért interferogram kiértékelésére is. ``` import numpy as np import matplotlib.pyplot as plt import pysprint as ps g = ps.Generator(1, 4, 2.5, 1500, GDD=400, TOD=400, FOD=1000, pulse_width=4, resolution=0.05) g.generate_freq() ``` #### 7.1 Automatikus kiértékelés ``` f = ps.FFTMethod(*g.data) f.autorun(reference_point=2.5, order=4) ``` Egy másik automatikus kiértékelés (ugyan azon az interferogramon), ezúttal csak a fázist kapjuk meg. Ennek a fázisgrafikonnak a széleit kivágjuk a `slice` függvénnyel, majd a `fit` metódust használva számolhatjuk a diszperziós együtthatókat. ``` f2 = ps.FFTMethod(*g.data) phase = f.autorun(show_graph=False, enable_printing=False) print(type(phase)) phase.slice(1.1, 3.9) phase.fit(reference_point=2.5, order=4); ``` Bár látható volt, hogy a program jól határozta meg a Gauss ablakfüggvény paramétereit és ezáltal a diszperziós együtthatókat is, de jelzett, hogy a kivágandó csúcs túl közel van az origóhoz, így jobb ha azt manuálisan állítjuk be. Nézzük meg a fázist az illesztett görbével: ``` phase.plot() ``` Majd az illesztési hiba: ``` phase.errorplot(percent=True) ``` #### 7.2 Manuális kiértékelés Nézzünk meg egy manuális kiértékelést. Itt a nekem meglévő interferogramot fogom használni, ami enyhén szólva sem ideális a Fourier-transzformációs kiértékeléshez, de megpróbálom a legtöbb használható információt kihozni belőle. Mivel már előre tudom hogy hogyan érdemes az ablakfüggvényt beállítani, így itt az ún. `inplace=False` argumentumot fogom használni. Alapvetően minden függvény amit meghívunk `inplace=True` módon hajtódik végre, azaz megváltoztatja magát az objektumot. Így működik pl. a python listáknál az `append` függvény: ```python >>> a = [] >>> a.append(1) >>> print(a) [1] ``` A csomag során sok függvénynél lehetőség van megadni az `inplace=False` argumentumot, ami nem változtatja meg magát az objektumot, hanem visszaad egy új másolatot belőle, és kért függvényt azon a másolaton fogja végrehajtani. Ennek két előnye van: Az eredeti objektum (és így vele minden eredetileg betöltött adatsor) megmarad, és anélkül hogy újra és újra betöltenénk más objektumba az adatokat, ezért elég belőle egy. A második előny pedig abból adódik, hogy megengedi a műveletek láncolását, ahogy az alábbi példa mutatja. ([fluent interfacing](https://en.wikipedia.org/wiki/Fluent_interface) and [method cascading](https://en.wikipedia.org/wiki/Method_cascading)) Itt a szokásos kiértékelési lépéseket hajtottam végre. Az utolsó függvény amit meghívtam rajta, az a `build_phase`, ami egy fázist ad vissza, ezért a hosszú láncolat után az lesz a visszatérített érték (ezt elneveztem `phase3`-nak). ``` f3 = ps.FFTMethod.parse_raw('datasets/ifg.trt', skiprows=8, meta_len=8, decimal=",", delimiter=";") phase3 = ( f3.chdomain(inplace=False) .ifft(inplace=False) .window(at=145, fwhm=240, window_order=16, inplace=False) .apply_window(inplace=False) .fft(inplace=False) .build_phase() ) ``` Itt a jobb olvashatóság miatt minden új függvénynél új sort kezdtem és zárójelbe tettem. Ezek nélkül így festene: ``` phase4 = f3.chdomain(inplace=False).ifft(inplace=False).window(at=145, fwhm=240, window_order=16, plot=False, inplace=False).apply_window(inplace=False).fft(inplace=False).build_phase() ``` Mivel nem volt ideális az interferogram vizsgáljuk meg milyen fázist kaptunk vissza. ``` phase3.plot() ``` Itt észrevehető, hogy vannak olyan részei a görbének, amely valóban tartalmazza a minta fázistulajdonságait. Vágjuk ki ezt a részt a `slice` függvénnyel. ``` phase3.slice(1.71, 2.72) phase3.plot() ``` Ezután végezzük el az illesztést a `fit` függvénnyel: ``` phase3.fit(reference_point=2.355, order=3); ``` A kapott diszperziós együtthatók valóban jó közelítéssel tükrözik a mintára jellemző (már egyéb módszerekkel meghatározott) koefficienseket. Vizsgáljuk meg az illesztési hibát is: ``` phase3.errorplot(percent=True) ``` Ugyan ez a kiértékelés hagyományosan, az `inplace=False` paraméterek nélkül így néz ki: ``` f4 = ps.FFTMethod.parse_raw('datasets/ifg.trt', skiprows=8, meta_len=8, decimal=",", delimiter=";") f4.chdomain() f4.ifft() f4.window(at=145, fwhm=240, window_order=16, plot=False) f4.apply_window() f4.fft() phase4 = f4.build_phase() phase4.slice(1.71, 2.72) phase4.fit(2.355, 3); ``` Próbáljuk meg az impulzus időbeli alakját kiszámolni. Ehhez a `get_pulse_shape_from_file` függvényt fogom használni, aminek a tárgykar spektrumát adom meg. ``` x_t, y_t = f4.get_pulse_shape_from_file("datasets/sam.trt", truncate=True, chdomain=True, skiprows=8, decimal=",", sep=";") plt.plot(x_t, y_t) plt.grid() plt.xlabel("t [fs]"); ``` Mivel a használt interferogram nem volt ideális, így itt az impulzus alakját nem lehetett tökéletesen visszakapni. Alapértelmezetten néhány dolog el van rejtve a felhasználó elől. Az előző `get_pulse_shape_from_file` függvényt újra lefuttatom, ezúttal teljes logging outputtal. Ezt szinte soha nem kell használnunk, itt is csak a magyarázat miatt van létjogosultsága. ``` import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) x_t, y_t = f4.get_pulse_shape_from_file("datasets/sam.trt", truncate=True, chdomain=True, skiprows=8, decimal=",", sep=";") ``` Látható, hogy *2.322* és *2.719 PHz* között 287 adatpontnál sikerült kiszámítani a $I(t) = |\mathcal{F}^{-1}\{\sqrt{|I_{tárgy}(\omega)|}\cdot e^{-i\Phi{(\omega)}}\}|^2$ kifejezés értékét. Ez annak köszönhető, hogy a kiszámolt fázist elég nagy tartományban nem tudtuk felhasználni (eredetileg a *1.71 - 2.72* *PHz* tartományt vágtuk ki), illetve az transzformációk során behozott numerikus hiba is közrejátszott. #### 7.3 NUFFT Végül a Non-unifrom FFT használata. Itt teljesen ugyan azt hajtom végre, mint fentebb, csak `usenifft=True` argumentummal. ``` # csak visszaállítom a log szintet az alapértelmezettre, hogy ne árassza el a képernyőt logger.setLevel(logging.ERROR) f5 = ps.FFTMethod.parse_raw('datasets/ifg.trt', skiprows=8, meta_len=8, decimal=",", delimiter=";") f5.chdomain() f5.ifft(usenifft=True) f5.window(at=155, fwhm=260, window_order=16, plot=False) f5.apply_window() f5.fft() phase6 = f5.build_phase() phase6.slice(None, 2.49) phase6.fit(2.355, 3); phase6.plot() ``` A szimulációk alapján a NUFFT valamivel pontatlanabb eredményt ad, mint az interpoláció + FFT.
github_jupyter
``` # our usual things! %matplotlib inline import pandas as pd import matplotlib.pyplot as plt import numpy as np # weather in Champaign! w = pd.read_csv("/Users/jillnaiman1/Downloads/2018_ChampaignWeather.csv") w # sort by date w.sort_values(by='DATE') # w is our pandas dataframe, sort_values is a pandas call type(w['DATE']) w['DATE'] = pd.to_datetime(w['DATE']) # changing to datetime format # lets just look at 1 station mask = w['NAME'] == 'CHAMPAIGN 3 S, IL US' mask # minium temperature during a day of 2018 plt.plot(w['DATE'][mask], w['TMIN'][mask], label='Min Temp') plt.plot(w['DATE'][mask], w['TMAX'][mask], label='Max Temp') # label our axes plt.xlabel('Date') plt.ylabel('Temp in F') plt.legend() # make our plots a bit bigger plt.rcParams['figure.dpi'] = 100 ``` # Histograms & Rolling Averages ``` mean_temp = 0.5*(w['TMIN']+w['TMAX'])[mask] import ipywidgets # interactivity # make our data look less noisy with rolling averages @ipywidgets.interact(window=(1,40,1)) def make_plot(window): mean_temp_avg = mean_temp.rolling(window=window).mean() plt.plot(mean_temp, marker='.', linewidth=0.5, alpha=0.5) plt.plot(mean_temp_avg, marker='.', linewidth=1.5) plt.xlabel('Date') plt.ylabel('Mean Daily Temp in F') w.keys() precp = w['PRCP'][mask] # we want to format our dates correctly import matplotlib.dates as mdates set_ind = False for k in w.keys(): if k.find('DATE') != -1: # have we indexed by date yet? set_ind = True if set_ind: w.set_index('DATE', inplace=True) #w['PRCP'] #w names = ['SteveBob', 'Jerry', 'Frank'] for n in names: print(n.find('Bob')) # because we have re-indexed, lets redefine our arrays mask = w['NAME'] == 'CHAMPAIGN 3 S, IL US' mean_temp = 0.5*(w['TMIN']+w['TMAX'])[mask] precp = w['PRCP'][mask] @ipywidgets.interact(window=(1,60,2)) def make_plot(window): fig, ax = plt.subplots(1,2, figsize=(10,4)) # (2) This was right-handed binning #mean_temp_avg = mean_temp.rolling(window=window).mean() mean_temp_avg = mean_temp.rolling(window=window, center=True).mean() mean_temp.plot(ax=ax[0]) # using pandas to plot mean_temp_avg.plot(ax=ax[0]) # (1) We tried this, but its not highlighting what # we want #precp_avg = precp.rolling(window=window).mean() # (2) This was right-handed binning #precp_avg = precp.rolling(window=window).sum() precp_avg = precp.rolling(window=window, center=True).sum() precp.plot(ax=ax[1], marker='.', linewidth=0.5, alpha=0.5) precp_avg.plot(ax=ax[1], marker='.', linewidth=1.5) ax[1].set_xlabel('Date') ax[1].set_ylabel('Daily rainfall in inches') ax[0].set_xlabel('Date') ax[0].set_ylabel('Mean Daily Temp in F') precp.rolling? # now lets look at a binning example for our rainfall data # this is a strict histogram/rebinning exercise, NOT smoothing # Note: rolling averages/sums as we've been using, is somewhere # between smoothing & binning @ipywidgets.interact(window=(1,60,1), day_bins=(1,100,5)) def make_plot(window,day_bins): fig, ax = plt.subplots(1,2,figsize=(10,4)) mean_temp_avg = mean_temp.rolling(window=window,center=True).mean() mean_temp.plot(ax=ax[0]) mean_temp_avg.plot(ax=ax[0]) precp.plot(ax=ax[1], marker='.', linewidth=0.5, alpha=0.5) precp_resampled = precp.resample(str(day_bins)+'D').sum() # day_bins = 5 => '5D', resampling by months is 'M' precp_resampled.plot(ax=ax[1], marker='.') ax[1].set_xlabel('Date') ax[1].set_ylabel('Summed Rainfall over ' + str(day_bins) + 'days, in Inches') ax[0].set_xlabel('Date') ax[0].set_ylabel('Mean Daily Temp in F') ``` ## Take aways * rolling averages => smoothing-lite, or fancy binning => like on our left temp plot * on the right rainfall plot => HISTOGRAM * so, in binning or histograming we are more "truthful" to the originial data, where in smoothing we can "double count" data points across bins ## Quick look at windowing Toy example first ``` # window of 10 bins, constant data in the window npoints = 10 x = np.arange(0,npoints) y = np.repeat(1,npoints) plt.plot(x,y,'o', label='Original Data') # so lets say we really want to highlight the center # bins plt.plot(x,y*np.bartlett(npoints),'o', label='Bartlett') # also another type of window plt.plot(x,y*np.hamming(npoints),'o',label='Hamming') plt.legend() # plot available windows windows_avail = [None,'boxcar','triang','blackman','hamming', 'bartlett','parzen', 'bohman', 'blackmanharris','nuttall','barthann'] @ipywidgets.interact(window=(1,100,1), window_type=windows_avail) def make_plot(window, window_type): plt.plot(mean_temp) mean_temp_avg = mean_temp.rolling(window=window,center=True,win_type=window_type).mean() plt.plot(mean_temp_avg) ``` # Similar binning & Smoothing in 2D ``` ufos = pd.read_csv("/Users/jillnaiman1/Downloads/ufo-scrubbed-geocoded-time-standardized-00.csv", names = ["date", "city", "state", "country", "shape", "duration_seconds", "duration", "comment", "report_date", "latitude", "longitude"], parse_dates = ["date", "report_date"]) # if you get a memory error, don't panic! ufos # quick plot plt.plot(ufos['longitude'], ufos['latitude'],'.') # colormaps import matplotlib.cm as cm plt.colormaps() plt.scatter(ufos['longitude'],ufos['latitude'],c=np.log10(ufos['duration_seconds'])) plt.scatter(ufos['longitude'][0:10], ufos['latitude'][0:10],c=ufos['duration_seconds'][0:10]) # our data is hard to see, lets try some rebinning in 2D plt.hexbin(ufos["longitude"], ufos["latitude"], ufos["duration_seconds"], gridsize=32,bins='log') # almost the exact same thing we did with histograms before in 1D for our rainfall data # can also smooth 2D images import PIL.Image as Image im = Image.open('/Users/jillnaiman1/Downloads/stitch_reworked.png') fig,ax = plt.subplots(figsize=(5,5)) ax.imshow(im) import PIL.ImageFilter as ImageFilter myFilter = ImageFilter.GaussianBlur(radius=1) smoothed_image = im.filter(myFilter) fig,ax = plt.subplots(figsize=(5,5)) ax.imshow(smoothed_image) data_im = np.array(im) np.unique(data_im) data_sm = np.array(smoothed_image) np.unique(data_sm) ```
github_jupyter
# GeoNet FDSN webservice with Obspy demo - Station Service This demo introduces some simple code that requests data using [GeoNet's FDSN webservices](http://www.geonet.org.nz/data/tools/FDSN) and the [obspy module](https://github.com/obspy/obspy/wiki) in python. This notebook uses Python 3. ### Getting Started - Import Modules ``` from obspy import UTCDateTime from obspy.clients.fdsn import Client as FDSN_Client from obspy import read_inventory ``` ### Define GeoNet FDSN client ``` client = FDSN_Client("GEONET") ``` ## Accessing Station Metadata Use the **station** service to access station metadata from GeoNet stations. Note, that metadata provided is prodominately associated with data types available from the FDSN archive, and therefore does not include things such as Geodetic station information. This example gets all stations that are operating at the time of the Kaikoura earthquake and that are located within a 0.5 degrees radius of the epicentre. It lists the station codes and plots them on a map. ``` inventory = client.get_stations(latitude=-42.693,longitude=173.022,maxradius=0.5, starttime = "2016-11-13 11:05:00.000",endtime = "2016-11-14 11:00:00.000") print(inventory) _=inventory.plot(projection="local") ``` The following examples dive into retrieving different information from the inventory object. This object is based on FDSN stationXML and therefore can provide much the same information. To get all available information into the inventory you will want to request data down to the response level. The default requests information just to a station level. For more information, see the [obspy inventory class](http://docs.obspy.org/packages/obspy.core.inventory.html#module-obspy.core.inventory). This example gets data from a station, KUZ, and prints a summary of the inventory contents ``` inventory = client.get_stations(station="KUZ",level="response", starttime = "2016-11-13 11:05:00.000",endtime = "2016-11-14 11:00:00.000") print(inventory) ``` Now, we can look at more information, such as specifics about the station. Such as the time it opened and location. ``` network = inventory[0] station = network[0] # equivalent to inventory[0][0] num_channels = len(station) print(station) ``` We can drill down even futher into a particular channel and look at the time it was operating for, whether it was continously recording, the sample rate and some basic sensor information. ``` channel = station[0] # equivalent to inventory[0][0][0] print(channel) ``` This channel states that there is response information available, so we can look at a summary of the response and plot it. ``` resp = channel.response print(resp) resp.plot(0.001,output="VEL",label='KUZ HHZ') ```
github_jupyter
``` from keras import applications from keras.models import Sequential, Model from keras.models import Model from keras.layers import Dropout, Flatten, Dense, Activation, Reshape from keras.callbacks import CSVLogger import tensorflow as tf from scipy.ndimage import imread import numpy as np import random from keras.layers import GRU, CuDNNGRU, LSTM, Input from keras.layers import Conv1D, MaxPooling1D from keras.layers.advanced_activations import LeakyReLU from keras import backend as K import keras from keras.callbacks import CSVLogger, ModelCheckpoint from keras.backend.tensorflow_backend import set_session from keras import optimizers import h5py from sklearn.preprocessing import MinMaxScaler import os import pandas as pd import matplotlib.pyplot as plt import h5py with h5py.File('../Data/' + ''.join(['BTC.h5']), 'r') as hf: datas = hf['inputs'].value labels = hf['outputs'].value input_times = hf['input_times'].value output_times = hf['output_times'].value original_inputs = hf['original_inputs'].value original_outputs = hf['original_outputs'].value original_datas = hf['original_datas'].value scaler=MinMaxScaler((-1, 1)) #split training validation # training_size = int(0.8* datas.shape[0]) training_size = datas.shape[0] - 1 training_datas = datas[:training_size,:,:] training_labels = labels[:training_size,:,:] validation_datas = datas[training_size:,:,:] validation_labels = labels[training_size:,:,:] validation_original_outputs = original_outputs[training_size:,:,:] validation_original_inputs = original_inputs[training_size:,:,:] validation_input_times = input_times[training_size:,:,:] validation_output_times = output_times[training_size:,:,:] validation_size = datas.shape[0] - training_size training_labels = [np.array(training_labels[:, :, 0]).reshape((training_size, -1)), np.array(training_labels[:, :, 1]).reshape((training_size, -1)), np.array(training_labels[:, :, 2]).reshape((training_size, -1))] validation_labels = [np.array(validation_labels[:, :, 0]).reshape((validation_size, -1)), np.array(validation_labels[:, :, 1]).reshape((validation_size, -1)), np.array(validation_labels[:, :, 2]).reshape((validation_size, -1))] os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = '0' os.environ['TF_CPP_MIN_LOG_LEVEL']='2' config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) ground_true = np.append(validation_original_inputs,validation_original_outputs, axis=1) ground_true_times = np.append(validation_input_times,validation_output_times, axis=1) print(ground_true_times.shape) print(ground_true.shape) step_size = datas.shape[1] batch_size = 8 n_features = datas.shape[2] epochs = 1 output_size = 12 units = 150 # model = Sequential() # model.add(GRU(units=units, activation=None, input_shape=(step_size,nb_features),return_sequences=False)) # model.add(Activation('tanh')) # model.add(Dropout(0.2)) # model.add(Dense(output_size, activation="linear")) # model.add(LeakyReLU(alpha=0.001)) # model.load_weights('../weights/BTC_GRU_1_tanh_relu-49-0.00001.hdf5') # model.compile(loss='mape', optimizer='adam') input_layer = Input(shape=(step_size, n_features)) layer_1 = GRU(units=units, return_sequences=True)(input_layer) layer_1 = Dropout(0.5)(layer_1) layer_2 = GRU(units=units, return_sequences=False)(layer_1) layer_2 = Dropout(0.5)(layer_2) output_1 = Dense(output_size, activation="tanh", name="close_dense")(layer_2) output_2 = Dense(output_size, activation="tanh", name="high_dense")(layer_2) output_3 = Dense(output_size, activation="tanh", name="low_dense")(layer_2) model = Model(inputs=input_layer, outputs=[output_1, output_2, output_3]) model.load_weights('../weights/BTC_GRU_1_tanh_relu-209-0.00000034.hdf5') model.compile(optimizer="adam", loss=["mse", "mse", "mse"], loss_weights=[0.001, 0.001, 0.001]) predicted = np.array(model.predict(validation_datas)) print(predicted.shape) predicted = predicted.reshape((predicted.shape[1] * predicted.shape[2], predicted.shape[0])) predicted_inverted = [] predicted.shape scaler.fit(original_datas.reshape(-1, n_features)) # predicted_inverted.append(scaler.inverse_transform(predicted)) predicted_inverted = scaler.inverse_transform(predicted[:, :]) print(np.array(predicted_inverted).shape) #get only the close data ground_true = ground_true[:, :, :].reshape(-1, n_features) ground_true_times = ground_true_times.reshape(-1) ground_true_times = pd.to_datetime(ground_true_times, unit='s') # since we are appending in the first dimension # predicted_inverted = np.array(predicted_inverted)[0,:,:].reshape(-1) print(np.array(predicted_inverted).shape) validation_output_times = pd.to_datetime(validation_output_times.reshape(-1), unit='s') predicted_inverted[:, 0] validation_output_times.shape ground_true_df = pd.DataFrame() ground_true_df['times'] = ground_true_times ground_true_df['close'] = ground_true[:, 0] ground_true_df['high'] = ground_true[:, 1] ground_true_df['low'] = ground_true[:, 2] ground_true_df.set_index('times').reset_index() ground_true_df.shape prediction_df = pd.DataFrame() prediction_df['times'] = validation_output_times prediction_df['close'] = predicted_inverted[:, 0] prediction_df['high'] = predicted_inverted[:, 1] prediction_df['low'] = predicted_inverted[:, 2] prediction_df.shape prediction_df = prediction_df.loc[(prediction_df["times"].dt.year == 2018 )&(prediction_df["times"].dt.month >= 7 ),: ] # ground_true_df = ground_true_df.loc[(ground_true_df["times"].dt.year >= 2017 )&(ground_true_df["times"].dt.month > 7 ),:] ground_true_df = ground_true_df.loc[:,:] start_idx = 350 plt.figure(figsize=(20,10)) plt.plot(ground_true_df.times[start_idx:],ground_true_df.close[start_idx:], label = 'Actual Close') # plt.plot(ground_true_df.times[start_idx:],ground_true_df.high[start_idx:], label = 'Actual High') # plt.plot(ground_true_df.times[start_idx:],ground_true_df.low[start_idx:], label = 'Actual Low') plt.plot(prediction_df.times,prediction_df.high,'g-', label='Predicted High') plt.plot(prediction_df.times,prediction_df.close,'r-', label='Predicted Close') plt.plot(prediction_df.times,prediction_df.low,'b-', label='Predicted Low') plt.legend(loc='upper left') plt.grid() plt.title("Predicted USD for last 7 days from " + str(ground_true_df["times"].dt.date.iloc[-12]) + " to " + str(ground_true_df["times"].dt.date.iloc[-1])) plt.savefig('../Results/BTC/New/BTC_close_GRU_1_tanh_relu_result.png') plt.show() from sklearn.metrics import mean_squared_error mean_squared_error(validation_original_outputs[:,:,0].reshape(-1),predicted_inverted[:, 0]) ```
github_jupyter
Thanks for @christofhenkel @abhishek @iezepov for their great work: https://www.kaggle.com/christofhenkel/how-to-preprocessing-for-glove-part2-usage https://www.kaggle.com/abhishek/pytorch-bert-inference https://www.kaggle.com/iezepov/starter-gensim-word-embeddings ``` import sys package_dir = "../input/ppbert/pytorch-pretrained-bert/pytorch-pretrained-BERT" sys.path.append(package_dir) from __future__ import absolute_import from __future__ import division from __future__ import print_function %reload_ext autoreload %autoreload 2 %matplotlib inline import fastai from fastai.train import Learner from fastai.train import DataBunch from fastai.callbacks import TrainingPhase, GeneralScheduler from fastai.basic_data import DatasetType import fastprogress from fastprogress import force_console_behavior import numpy as np from pprint import pprint import pandas as pd import os import time import gc import random from tqdm._tqdm_notebook import tqdm_notebook as tqdm from keras.preprocessing import text, sequence import torch from torch import nn from torch.utils import data from torch.nn import functional as F import torch.utils.data from tqdm import tqdm import warnings from pytorch_pretrained_bert import BertTokenizer, BertForSequenceClassification, BertAdam from pytorch_pretrained_bert import BertConfig from nltk.tokenize.treebank import TreebankWordTokenizer from gensim.models import KeyedVectors def convert_lines(example, max_seq_length,tokenizer): max_seq_length -=2 all_tokens = [] longer = 0 for text in tqdm(example): tokens_a = tokenizer.tokenize(text) if len(tokens_a)>max_seq_length: tokens_a = tokens_a[:max_seq_length] longer += 1 one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a)) all_tokens.append(one_token) return np.array(all_tokens) def is_interactive(): return 'SHLVL' not in os.environ def seed_everything(seed=123): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True def get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32') def load_embeddings(path): #with open(path,'rb') as f: emb_arr = KeyedVectors.load(path) return emb_arr def build_matrix(word_index, path): embedding_index = load_embeddings(path) embedding_matrix = np.zeros((max_features + 1, 300)) unknown_words = [] for word, i in word_index.items(): if i <= max_features: try: embedding_matrix[i] = embedding_index[word] except KeyError: try: embedding_matrix[i] = embedding_index[word.lower()] except KeyError: try: embedding_matrix[i] = embedding_index[word.title()] except KeyError: unknown_words.append(word) return embedding_matrix, unknown_words def sigmoid(x): return 1 / (1 + np.exp(-x)) class SpatialDropout(nn.Dropout2d): def forward(self, x): x = x.unsqueeze(2) # (N, T, 1, K) x = x.permute(0, 3, 2, 1) # (N, K, 1, T) x = super(SpatialDropout, self).forward(x) # (N, K, 1, T), some features are masked x = x.permute(0, 3, 2, 1) # (N, T, 1, K) x = x.squeeze(2) # (N, T, K) return x def train_model(learn,test,output_dim,lr=0.001, batch_size=512, n_epochs=4, enable_checkpoint_ensemble=True): all_test_preds = [] checkpoint_weights = [2 ** epoch for epoch in range(n_epochs)] test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False) n = len(learn.data.train_dl) phases = [(TrainingPhase(n).schedule_hp('lr', lr * (0.6**(i)))) for i in range(n_epochs)] sched = GeneralScheduler(learn, phases) learn.callbacks.append(sched) for epoch in range(n_epochs): learn.fit(1) test_preds = np.zeros((len(test), output_dim)) for i, x_batch in enumerate(test_loader): X = x_batch[0].cuda() y_pred = sigmoid(learn.model(X).detach().cpu().numpy()) test_preds[i * batch_size:(i+1) * batch_size, :] = y_pred all_test_preds.append(test_preds) if enable_checkpoint_ensemble: test_preds = np.average(all_test_preds, weights=checkpoint_weights, axis=0) else: test_preds = all_test_preds[-1] return test_preds def handle_punctuation(x): x = x.translate(remove_dict) x = x.translate(isolate_dict) return x def handle_contractions(x): x = tokenizer.tokenize(x) return x def fix_quote(x): x = [x_[1:] if x_.startswith("'") else x_ for x_ in x] x = ' '.join(x) return x def preprocess(x): x = handle_punctuation(x) x = handle_contractions(x) x = fix_quote(x) return x class SequenceBucketCollator(): def __init__(self, choose_length, sequence_index, length_index, label_index=None): self.choose_length = choose_length self.sequence_index = sequence_index self.length_index = length_index self.label_index = label_index def __call__(self, batch): batch = [torch.stack(x) for x in list(zip(*batch))] sequences = batch[self.sequence_index] lengths = batch[self.length_index] length = self.choose_length(lengths) mask = torch.arange(start=maxlen, end=0, step=-1) < length padded_sequences = sequences[:, mask] batch[self.sequence_index] = padded_sequences if self.label_index is not None: return [x for i, x in enumerate(batch) if i != self.label_index], batch[self.label_index] return batch class NeuralNet(nn.Module): def __init__(self, embedding_matrix, num_aux_targets): super(NeuralNet, self).__init__() embed_size = embedding_matrix.shape[1] self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = SpatialDropout(0.3) self.lstm1 = nn.LSTM(embed_size, LSTM_UNITS, bidirectional=True, batch_first=True) self.lstm2 = nn.LSTM(LSTM_UNITS * 2, LSTM_UNITS, bidirectional=True, batch_first=True) self.linear1 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS) self.linear2 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS) self.linear_out = nn.Linear(DENSE_HIDDEN_UNITS, 1) self.linear_aux_out = nn.Linear(DENSE_HIDDEN_UNITS, num_aux_targets) def forward(self, x, lengths=None): h_embedding = self.embedding(x.long()) h_embedding = self.embedding_dropout(h_embedding) h_lstm1, _ = self.lstm1(h_embedding) h_lstm2, _ = self.lstm2(h_lstm1) # global average pooling avg_pool = torch.mean(h_lstm2, 1) # global max pooling max_pool, _ = torch.max(h_lstm2, 1) h_conc = torch.cat((max_pool, avg_pool), 1) h_conc_linear1 = F.relu(self.linear1(h_conc)) h_conc_linear2 = F.relu(self.linear2(h_conc)) hidden = h_conc + h_conc_linear1 + h_conc_linear2 result = self.linear_out(hidden) aux_result = self.linear_aux_out(hidden) out = torch.cat([result, aux_result], 1) return out def custom_loss(data, targets): bce_loss_1 = nn.BCEWithLogitsLoss(weight=targets[:,1:2])(data[:,:1],targets[:,:1]) bce_loss_2 = nn.BCEWithLogitsLoss()(data[:,1:],targets[:,2:]) return (bce_loss_1 * loss_weight) + bce_loss_2 def reduce_mem_usage(df): start_mem = df.memory_usage().sum() / 1024**2 print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) for col in df.columns: col_type = df[col].dtype if col_type != object: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) else: df[col] = df[col].astype('category') end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df warnings.filterwarnings(action='once') device = torch.device('cuda') MAX_SEQUENCE_LENGTH = 300 SEED = 1234 BATCH_SIZE = 512 BERT_MODEL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/' np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True bert_config = BertConfig('../input/arti-bert-inference/bert/bert_config.json') tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_PATH, cache_dir=None,do_lower_case=True) tqdm.pandas() CRAWL_EMBEDDING_PATH = '../input/gensim-embeddings-dataset/crawl-300d-2M.gensim' GLOVE_EMBEDDING_PATH = '../input/gensim-embeddings-dataset/glove.840B.300d.gensim' NUM_MODELS = 2 LSTM_UNITS = 128 DENSE_HIDDEN_UNITS = 4 * LSTM_UNITS MAX_LEN = 220 if not is_interactive(): def nop(it, *a, **k): return it tqdm = nop fastprogress.fastprogress.NO_BAR = True master_bar, progress_bar = force_console_behavior() fastai.basic_train.master_bar, fastai.basic_train.progress_bar = master_bar, progress_bar seed_everything() ``` **BERT Part** ``` test_df = pd.read_csv("../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv") test_df['comment_text'] = test_df['comment_text'].astype(str) X_test = convert_lines(test_df["comment_text"].fillna("DUMMY_VALUE"), MAX_SEQUENCE_LENGTH, tokenizer) model = BertForSequenceClassification(bert_config, num_labels=1) model.load_state_dict(torch.load("../input/arti-bert-inference/bert/bert_pytorch.bin")) model.to(device) for param in model.parameters(): param.requires_grad = False model.eval() test_preds = np.zeros((len(X_test))) test = torch.utils.data.TensorDataset(torch.tensor(X_test, dtype=torch.long)) test_loader = torch.utils.data.DataLoader(test, batch_size=512, shuffle=False) tk0 = tqdm(test_loader) for i, (x_batch,) in enumerate(tk0): pred = model(x_batch.to(device), attention_mask=(x_batch > 0).to(device), labels=None) test_preds[i * 512:(i + 1) * 512] = pred[:, 0].detach().cpu().squeeze().numpy() test_pred = torch.sigmoid(torch.tensor(test_preds)).numpy().ravel() submission_bert = pd.DataFrame.from_dict({ 'id': test_df['id'], 'prediction': test_pred }) ``` **LSTM Part** ``` train_df = reduce_mem_usage(pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/train.csv')) symbols_to_isolate = '.,?!-;*"…:—()%#$&_/@\・ω+=”“[]^–>\\°<~•≠™ˈʊɒ∞§{}·τα❤☺ɡ|¢→̶`❥━┣┫┗O►★©―ɪ✔®\x96\x92●£♥➤´¹☕≈÷♡◐║▬′ɔː€۩۞†μ✒➥═☆ˌ◄½ʻπδηλσερνʃ✬SUPERIT☻±♍µº¾✓◾؟.⬅℅»Вав❣⋅¿¬♫CMβ█▓▒░⇒⭐›¡₂₃❧▰▔◞▀▂▃▄▅▆▇↙γ̄″☹➡«φ⅓„✋:¥̲̅́∙‛◇✏▷❓❗¶˚˙)сиʿ✨。ɑ\x80◕!%¯−flfi₁²ʌ¼⁴⁄₄⌠♭✘╪▶☭✭♪☔☠♂☃☎✈✌✰❆☙○‣⚓年∎ℒ▪▙☏⅛casǀ℮¸w‚∼‖ℳ❄←☼⋆ʒ⊂、⅔¨͡๏⚾⚽Φ×θ₩?(℃⏩☮⚠月✊❌⭕▸■⇌☐☑⚡☄ǫ╭∩╮,例>ʕɐ̣Δ₀✞┈╱╲▏▕┃╰▊▋╯┳┊≥☒↑☝ɹ✅☛♩☞AJB◔◡↓♀⬆̱ℏ\x91⠀ˤ╚↺⇤∏✾◦♬³の|/∵∴√Ω¤☜▲↳▫‿⬇✧ovm-208'‰≤∕ˆ⚜☁' symbols_to_delete = '\n🍕\r🐵😑\xa0\ue014\t\uf818\uf04a\xad😢🐶️\uf0e0😜😎👊\u200b\u200e😁عدويهصقأناخلىبمغر😍💖💵Е👎😀😂\u202a\u202c🔥😄🏻💥ᴍʏʀᴇɴᴅᴏᴀᴋʜᴜʟᴛᴄᴘʙғᴊᴡɢ😋👏שלוםבי😱‼\x81エンジ故障\u2009🚌ᴵ͞🌟😊😳😧🙀😐😕\u200f👍😮😃😘אעכח💩💯⛽🚄🏼ஜ😖ᴠ🚲‐😟😈💪🙏🎯🌹😇💔😡\x7f👌ἐὶήιὲκἀίῃἴξ🙄H😠\ufeff\u2028😉😤⛺🙂\u3000تحكسة👮💙فزط😏🍾🎉😞\u2008🏾😅😭👻😥😔😓🏽🎆🍻🍽🎶🌺🤔😪\x08‑🐰🐇🐱🙆😨🙃💕𝘊𝘦𝘳𝘢𝘵𝘰𝘤𝘺𝘴𝘪𝘧𝘮𝘣💗💚地獄谷улкнПоАН🐾🐕😆ה🔗🚽歌舞伎🙈😴🏿🤗🇺🇸мυтѕ⤵🏆🎃😩\u200a🌠🐟💫💰💎эпрд\x95🖐🙅⛲🍰🤐👆🙌\u2002💛🙁👀🙊🙉\u2004ˢᵒʳʸᴼᴷᴺʷᵗʰᵉᵘ\x13🚬🤓\ue602😵άοόςέὸתמדףנרךצט😒͝🆕👅👥👄🔄🔤👉👤👶👲🔛🎓\uf0b7\uf04c\x9f\x10成都😣⏺😌🤑🌏😯ех😲Ἰᾶὁ💞🚓🔔📚🏀👐\u202d💤🍇\ue613小土豆🏡❔⁉\u202f👠》कर्मा🇹🇼🌸蔡英文🌞🎲レクサス😛外国人关系Сб💋💀🎄💜🤢َِьыгя不是\x9c\x9d🗑\u2005💃📣👿༼つ༽😰ḷЗз▱ц🤣卖温哥华议会下降你失去所有的钱加拿大坏税骗子🐝ツ🎅\x85🍺آإشء🎵🌎͟ἔ油别克🤡🤥😬🤧й\u2003🚀🤴ʲшчИОРФДЯМюж😝🖑ὐύύ特殊作戦群щ💨圆明园קℐ🏈😺🌍⏏ệ🍔🐮🍁🍆🍑🌮🌯🤦\u200d𝓒𝓲𝓿𝓵안영하세요ЖљКћ🍀😫🤤ῦ我出生在了可以说普通话汉语好极🎼🕺🍸🥂🗽🎇🎊🆘🤠👩🖒🚪天一家⚲\u2006⚭⚆⬭⬯⏖新✀╌🇫🇷🇩🇪🇮🇬🇧😷🇨🇦ХШ🌐\x1f杀鸡给猴看ʁ𝗪𝗵𝗲𝗻𝘆𝗼𝘂𝗿𝗮𝗹𝗶𝘇𝗯𝘁𝗰𝘀𝘅𝗽𝘄𝗱📺ϖ\u2000үսᴦᎥһͺ\u2007հ\u2001ɩye൦lƽh𝐓𝐡𝐞𝐫𝐮𝐝𝐚𝐃𝐜𝐩𝐭𝐢𝐨𝐧Ƅᴨןᑯ໐ΤᏧ௦Іᴑ܁𝐬𝐰𝐲𝐛𝐦𝐯𝐑𝐙𝐣𝐇𝐂𝐘𝟎ԜТᗞ౦〔Ꭻ𝐳𝐔𝐱𝟔𝟓𝐅🐋ffi💘💓ё𝘥𝘯𝘶💐🌋🌄🌅𝙬𝙖𝙨𝙤𝙣𝙡𝙮𝙘𝙠𝙚𝙙𝙜𝙧𝙥𝙩𝙪𝙗𝙞𝙝𝙛👺🐷ℋ𝐀𝐥𝐪🚶𝙢Ἱ🤘ͦ💸ج패티W𝙇ᵻ👂👃ɜ🎫\uf0a7БУі🚢🚂ગુજરાતીῆ🏃𝓬𝓻𝓴𝓮𝓽𝓼☘﴾̯﴿₽\ue807𝑻𝒆𝒍𝒕𝒉𝒓𝒖𝒂𝒏𝒅𝒔𝒎𝒗𝒊👽😙\u200cЛ‒🎾👹⎌🏒⛸公寓养宠物吗🏄🐀🚑🤷操美𝒑𝒚𝒐𝑴🤙🐒欢迎来到阿拉斯ספ𝙫🐈𝒌𝙊𝙭𝙆𝙋𝙍𝘼𝙅ﷻ🦄巨收赢得白鬼愤怒要买额ẽ🚗🐳𝟏𝐟𝟖𝟑𝟕𝒄𝟗𝐠𝙄𝙃👇锟斤拷𝗢𝟳𝟱𝟬⦁マルハニチロ株式社⛷한국어ㄸㅓ니͜ʖ𝘿𝙔₵𝒩ℯ𝒾𝓁𝒶𝓉𝓇𝓊𝓃𝓈𝓅ℴ𝒻𝒽𝓀𝓌𝒸𝓎𝙏ζ𝙟𝘃𝗺𝟮𝟭𝟯𝟲👋🦊多伦🐽🎻🎹⛓🏹🍷🦆为和中友谊祝贺与其想象对法如直接问用自己猜本传教士没积唯认识基督徒曾经让相信耶稣复活死怪他但当们聊些政治题时候战胜因圣把全堂结婚孩恐惧且栗谓这样还♾🎸🤕🤒⛑🎁批判检讨🏝🦁🙋😶쥐스탱트뤼도석유가격인상이경제황을렵게만들지않록잘관리해야합다캐나에서대마초와화약금의품런성분갈때는반드시허된사용🔫👁凸ὰ💲🗯𝙈Ἄ𝒇𝒈𝒘𝒃𝑬𝑶𝕾𝖙𝖗𝖆𝖎𝖌𝖍𝖕𝖊𝖔𝖑𝖉𝖓𝖐𝖜𝖞𝖚𝖇𝕿𝖘𝖄𝖛𝖒𝖋𝖂𝕴𝖟𝖈𝕸👑🚿💡知彼百\uf005𝙀𝒛𝑲𝑳𝑾𝒋𝟒😦𝙒𝘾𝘽🏐𝘩𝘨ὼṑ𝑱𝑹𝑫𝑵𝑪🇰🇵👾ᓇᒧᔭᐃᐧᐦᑳᐨᓃᓂᑲᐸᑭᑎᓀᐣ🐄🎈🔨🐎🤞🐸💟🎰🌝🛳点击查版🍭𝑥𝑦𝑧NG👣\uf020っ🏉ф💭🎥Ξ🐴👨🤳🦍\x0b🍩𝑯𝒒😗𝟐🏂👳🍗🕉🐲چی𝑮𝗕𝗴🍒ꜥⲣⲏ🐑⏰鉄リ事件ї💊「」\uf203\uf09a\uf222\ue608\uf202\uf099\uf469\ue607\uf410\ue600燻製シ虚偽屁理屈Г𝑩𝑰𝒀𝑺🌤𝗳𝗜𝗙𝗦𝗧🍊ὺἈἡχῖΛ⤏🇳𝒙ψՁմեռայինրւդձ冬至ὀ𝒁🔹🤚🍎𝑷🐂💅𝘬𝘱𝘸𝘷𝘐𝘭𝘓𝘖𝘹𝘲𝘫کΒώ💢ΜΟΝΑΕ🇱♲𝝈↴💒⊘Ȼ🚴🖕🖤🥘📍👈➕🚫🎨🌑🐻𝐎𝐍𝐊𝑭🤖🎎😼🕷grntidufbk𝟰🇴🇭🇻🇲𝗞𝗭𝗘𝗤👼📉🍟🍦🌈🔭《🐊🐍\uf10aლڡ🐦\U0001f92f\U0001f92a🐡💳ἱ🙇𝗸𝗟𝗠𝗷🥜さようなら🔼' tokenizer = TreebankWordTokenizer() isolate_dict = {ord(c):f' {c} ' for c in symbols_to_isolate} remove_dict = {ord(c):f'' for c in symbols_to_delete} x_train = train_df['comment_text'].progress_apply(lambda x:preprocess(x)) y_aux_train = train_df[['target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat']] x_test = test_df['comment_text'].progress_apply(lambda x:preprocess(x)) identity_columns = [ 'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish', 'muslim', 'black', 'white', 'psychiatric_or_mental_illness'] # Overall weights = np.ones((len(x_train),)) / 4 # Subgroup weights += (train_df[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) / 4 # Background Positive, Subgroup Negative weights += (( (train_df['target'].values>=0.5).astype(bool).astype(np.int) + (train_df[identity_columns].fillna(0).values<0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4 # Background Negative, Subgroup Positive weights += (( (train_df['target'].values<0.5).astype(bool).astype(np.int) + (train_df[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4 loss_weight = 1.0 / weights.mean() y_train = np.vstack([(train_df['target'].values>=0.5).astype(np.int),weights]).T max_features = 410047 tokenizer = text.Tokenizer(num_words = max_features, filters='',lower=False) tokenizer.fit_on_texts(list(x_train) + list(x_test)) crawl_matrix, unknown_words_crawl = build_matrix(tokenizer.word_index, CRAWL_EMBEDDING_PATH) print('n unknown words (crawl): ', len(unknown_words_crawl)) glove_matrix, unknown_words_glove = build_matrix(tokenizer.word_index, GLOVE_EMBEDDING_PATH) print('n unknown words (glove): ', len(unknown_words_glove)) max_features = max_features or len(tokenizer.word_index) + 1 max_features embedding_matrix = np.concatenate([crawl_matrix, glove_matrix], axis=-1) embedding_matrix.shape del crawl_matrix del glove_matrix gc.collect() y_train_torch = torch.tensor(np.hstack([y_train, y_aux_train]), dtype=torch.float32) x_train = tokenizer.texts_to_sequences(x_train) x_test = tokenizer.texts_to_sequences(x_test) lengths = torch.from_numpy(np.array([len(x) for x in x_train])) maxlen = 300 x_train_padded = torch.from_numpy(sequence.pad_sequences(x_train, maxlen=maxlen)) test_lengths = torch.from_numpy(np.array([len(x) for x in x_test])) x_test_padded = torch.from_numpy(sequence.pad_sequences(x_test, maxlen=maxlen)) batch_size = 512 test_dataset = data.TensorDataset(x_test_padded, test_lengths) train_dataset = data.TensorDataset(x_train_padded, lengths, y_train_torch) valid_dataset = data.Subset(train_dataset, indices=[0, 1]) train_collator = SequenceBucketCollator(lambda lenghts: lenghts.max(), sequence_index=0, length_index=1, label_index=2) test_collator = SequenceBucketCollator(lambda lenghts: lenghts.max(), sequence_index=0, length_index=1) train_loader = data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=train_collator) valid_loader = data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, collate_fn=train_collator) test_loader = data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=test_collator) databunch = DataBunch(train_dl=train_loader, valid_dl=valid_loader, collate_fn=train_collator) all_test_preds = [] for model_idx in range(NUM_MODELS): print('Model ', model_idx) seed_everything(1 + model_idx) model = NeuralNet(embedding_matrix, y_aux_train.shape[-1]) learn = Learner(databunch, model, loss_func=custom_loss) test_preds = train_model(learn,test_dataset,output_dim=7) all_test_preds.append(test_preds) submission_lstm = pd.DataFrame.from_dict({ 'id': test_df['id'], 'prediction': np.mean(all_test_preds, axis=0)[:, 0] }) ``` **Blending part** ``` submission = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/sample_submission.csv') submission['prediction'] = ((submission_bert.prediction + submission_lstm.prediction)/2 + submission_lstm.prediction)/2 submission.to_csv('submission.csv', index=False) ```
github_jupyter
# python-sonic - Programming Music with Python, Sonic Pi or Supercollider Python-Sonic is a simple Python interface for Sonic Pi, which is a real great music software created by Sam Aaron (http://sonic-pi.net). At the moment Python-Sonic works with Sonic Pi. It is planned, that it will work with Supercollider, too. If you like it, use it. If you have some suggestions, tell me ([email protected]). ## Installation * First you need Python 3 (https://www.python.org, ) - Python 3.5 should work, because it's the development environment * Then Sonic Pi (https://sonic-pi.net) - That makes the sound * Modul python-osc (https://pypi.python.org/pypi/python-osc) - Connection between Python and Sonic Pi Server * And this modul python-sonic - simply copy the source Or try That should work. ## Limitations * You have to start _Sonic Pi_ first before you can use it with python-sonic * Only the notes from C2 to C6 ## Changelog |Version | | |--------------|------------------------------------------------------------------------------------------| | 0.2.0 | Some changes for Sonic Pi 2.11. Simpler multi-threading with decorator *@in_thread*. Messaging with *cue* and *sync*. | | 0.3.0 | OSC Communication | ## Examples Many of the examples are inspired from the help menu in *Sonic Pi*. ``` from psonic import * ``` The first sound ``` play(70) #play MIDI note 70 ``` Some more notes ``` play(72) sleep(1) play(75) sleep(1) play(79) ``` In more tratitional music notation ``` play(C5) sleep(0.5) play(D5) sleep(0.5) play(G5) ``` Play sharp notes like *F#* or dimished ones like *Eb* ``` play(Fs5) sleep(0.5) play(Eb5) ``` Play louder (parameter amp) or from a different direction (parameter pan) ``` play(72,amp=2) sleep(0.5) play(74,pan=-1) #left ``` Different synthesizer sounds ``` use_synth(SAW) play(38) sleep(0.25) play(50) sleep(0.5) use_synth(PROPHET) play(57) sleep(0.25) ``` ADSR *(Attack, Decay, Sustain and Release)* Envelope ``` play (60, attack=0.5, decay=1, sustain_level=0.4, sustain=2, release=0.5) sleep(4) ``` Play some samples ``` sample(AMBI_LUNAR_LAND, amp=0.5) sample(LOOP_AMEN,pan=-1) sleep(0.877) sample(LOOP_AMEN,pan=1) sample(LOOP_AMEN,rate=0.5) sample(LOOP_AMEN,rate=1.5) sample(LOOP_AMEN,rate=-1)#back sample(DRUM_CYMBAL_OPEN,attack=0.01,sustain=0.3,release=0.1) sample(LOOP_AMEN,start=0.5,finish=0.8,rate=-0.2,attack=0.3,release=1) ``` Play some random notes ``` import random for i in range(5): play(random.randrange(50, 100)) sleep(0.5) for i in range(3): play(random.choice([C5,E5,G5])) sleep(1) ``` Sample slicing ``` from psonic import * number_of_pieces = 8 for i in range(16): s = random.randrange(0,number_of_pieces)/number_of_pieces #sample starts at 0.0 and finishes at 1.0 f = s + (1.0/number_of_pieces) sample(LOOP_AMEN,beat_stretch=2,start=s,finish=f) sleep(2.0/number_of_pieces) ``` An infinite loop and if ``` while True: if one_in(2): sample(DRUM_HEAVY_KICK) sleep(0.5) else: sample(DRUM_CYMBAL_CLOSED) sleep(0.25) ``` If you want to hear more than one sound at a time, use Threads. ``` import random from psonic import * from threading import Thread def bass_sound(): c = chord(E3, MAJOR7) while True: use_synth(PROPHET) play(random.choice(c), release=0.6) sleep(0.5) def snare_sound(): while True: sample(ELEC_SNARE) sleep(1) bass_thread = Thread(target=bass_sound) snare_thread = Thread(target=snare_sound) bass_thread.start() snare_thread.start() while True: pass ``` Every function *bass_sound* and *snare_sound* have its own thread. Your can hear them running. ``` from psonic import * from threading import Thread, Condition from random import choice def random_riff(condition): use_synth(PROPHET) sc = scale(E3, MINOR) while True: s = random.choice([0.125,0.25,0.5]) with condition: condition.wait() #Wait for message for i in range(8): r = random.choice([0.125, 0.25, 1, 2]) n = random.choice(sc) co = random.randint(30,100) play(n, release = r, cutoff = co) sleep(s) def drums(condition): while True: with condition: condition.notifyAll() #Message to threads for i in range(16): r = random.randrange(1,10) sample(DRUM_BASS_HARD, rate=r) sleep(0.125) condition = Condition() random_riff_thread = Thread(name='consumer1', target=random_riff, args=(condition,)) drums_thread = Thread(name='producer', target=drums, args=(condition,)) random_riff_thread.start() drums_thread.start() input("Press Enter to continue...") ``` To synchronize the thread, so that they play a note at the same time, you can use *Condition*. One function sends a message with *condition.notifyAll* the other waits until the message comes *condition.wait*. More simple with decorator __@in_thread__ ``` from psonic import * from random import choice tick = Message() @in_thread def random_riff(): use_synth(PROPHET) sc = scale(E3, MINOR) while True: s = random.choice([0.125,0.25,0.5]) tick.sync() for i in range(8): r = random.choice([0.125, 0.25, 1, 2]) n = random.choice(sc) co = random.randint(30,100) play(n, release = r, cutoff = co) sleep(s) @in_thread def drums(): while True: tick.cue() for i in range(16): r = random.randrange(1,10) sample(DRUM_BASS_HARD, rate=r) sleep(0.125) random_riff() drums() input("Press Enter to continue...") from psonic import * tick = Message() @in_thread def metronom(): while True: tick.cue() sleep(1) @in_thread def instrument(): while True: tick.sync() sample(DRUM_HEAVY_KICK) metronom() instrument() while True: pass ``` Play a list of notes ``` from psonic import * play ([64, 67, 71], amp = 0.3) sleep(1) play ([E4, G4, B4]) sleep(1) ``` Play chords ``` play(chord(E4, MINOR)) sleep(1) play(chord(E4, MAJOR)) sleep(1) play(chord(E4, MINOR7)) sleep(1) play(chord(E4, DOM7)) sleep(1) ``` Play arpeggios ``` play_pattern( chord(E4, 'm7')) play_pattern_timed( chord(E4, 'm7'), 0.25) play_pattern_timed(chord(E4, 'dim'), [0.25, 0.5]) ``` Play scales ``` play_pattern_timed(scale(C3, MAJOR), 0.125, release = 0.1) play_pattern_timed(scale(C3, MAJOR, num_octaves = 2), 0.125, release = 0.1) play_pattern_timed(scale(C3, MAJOR_PENTATONIC, num_octaves = 2), 0.125, release = 0.1) ``` The function *scale* returns a list with all notes of a scale. So you can use list methodes or functions. For example to play arpeggios descending or shuffeld. ``` import random from psonic import * s = scale(C3, MAJOR) s s.reverse() play_pattern_timed(s, 0.125, release = 0.1) random.shuffle(s) play_pattern_timed(s, 0.125, release = 0.1) ``` ### Live Loop One of the best in SONIC PI is the _Live Loop_. While a loop is playing music you can change it and hear the change. Let's try it in Python, too. ``` from psonic import * from threading import Thread def my_loop(): play(60) sleep(1) def looper(): while True: my_loop() looper_thread = Thread(name='looper', target=looper) looper_thread.start() input("Press Enter to continue...") ``` Now change the function *my_loop* und you can hear it. ``` def my_loop(): use_synth(TB303) play (60, release= 0.3) sleep (0.25) def my_loop(): use_synth(TB303) play (chord(E3, MINOR), release= 0.3) sleep(0.5) def my_loop(): use_synth(TB303) sample(DRUM_BASS_HARD, rate = random.uniform(0.5, 2)) play(random.choice(chord(E3, MINOR)), release= 0.2, cutoff=random.randrange(60, 130)) sleep(0.25) ``` To stop the sound you have to end the kernel. In IPython with Kernel --> Restart Now with two live loops which are synch. ``` from psonic import * from threading import Thread, Condition from random import choice def loop_foo(): play (E4, release = 0.5) sleep (0.5) def loop_bar(): sample (DRUM_SNARE_SOFT) sleep (1) def live_loop_1(condition): while True: with condition: condition.notifyAll() #Message to threads loop_foo() def live_loop_2(condition): while True: with condition: condition.wait() #Wait for message loop_bar() condition = Condition() live_thread_1 = Thread(name='producer', target=live_loop_1, args=(condition,)) live_thread_2 = Thread(name='consumer1', target=live_loop_2, args=(condition,)) live_thread_1.start() live_thread_2.start() input("Press Enter to continue...") def loop_foo(): play (A4, release = 0.5) sleep (0.5) def loop_bar(): sample (DRUM_HEAVY_KICK) sleep (0.125) ``` If would be nice if we can stop the loop with a simple command. With stop event it works. ``` from psonic import * from threading import Thread, Condition, Event def loop_foo(): play (E4, release = 0.5) sleep (0.5) def loop_bar(): sample (DRUM_SNARE_SOFT) sleep (1) def live_loop_1(condition,stop_event): while not stop_event.is_set(): with condition: condition.notifyAll() #Message to threads loop_foo() def live_loop_2(condition,stop_event): while not stop_event.is_set(): with condition: condition.wait() #Wait for message loop_bar() condition = Condition() stop_event = Event() live_thread_1 = Thread(name='producer', target=live_loop_1, args=(condition,stop_event)) live_thread_2 = Thread(name='consumer1', target=live_loop_2, args=(condition,stop_event)) live_thread_1.start() live_thread_2.start() input("Press Enter to continue...") stop_event.set() ``` More complex live loops ``` sc = Ring(scale(E3, MINOR_PENTATONIC)) def loop_foo(): play (next(sc), release= 0.1) sleep (0.125) sc2 = Ring(scale(E3,MINOR_PENTATONIC,num_octaves=2)) def loop_bar(): use_synth(DSAW) play (next(sc2), release= 0.25) sleep (0.25) ``` Now a simple structure with four live loops ``` import random from psonic import * from threading import Thread, Condition, Event def live_1(): pass def live_2(): pass def live_3(): pass def live_4(): pass def live_loop_1(condition,stop_event): while not stop_event.is_set(): with condition: condition.notifyAll() #Message to threads live_1() def live_loop_2(condition,stop_event): while not stop_event.is_set(): with condition: condition.wait() #Wait for message live_2() def live_loop_3(condition,stop_event): while not stop_event.is_set(): with condition: condition.wait() #Wait for message live_3() def live_loop_4(condition,stop_event): while not stop_event.is_set(): with condition: condition.wait() #Wait for message live_4() condition = Condition() stop_event = Event() live_thread_1 = Thread(name='producer', target=live_loop_1, args=(condition,stop_event)) live_thread_2 = Thread(name='consumer1', target=live_loop_2, args=(condition,stop_event)) live_thread_3 = Thread(name='consumer2', target=live_loop_3, args=(condition,stop_event)) live_thread_4 = Thread(name='consumer3', target=live_loop_3, args=(condition,stop_event)) live_thread_1.start() live_thread_2.start() live_thread_3.start() live_thread_4.start() input("Press Enter to continue...") ``` After starting the loops you can change them ``` def live_1(): sample(BD_HAUS,amp=2) sleep(0.5) pass def live_2(): #sample(AMBI_CHOIR, rate=0.4) #sleep(1) pass def live_3(): use_synth(TB303) play(E2, release=4,cutoff=120,cutoff_attack=1) sleep(4) def live_4(): notes = scale(E3, MINOR_PENTATONIC, num_octaves=2) for i in range(8): play(random.choice(notes),release=0.1,amp=1.5) sleep(0.125) ``` And stop. ``` stop_event.set() ``` ### Creating Sound ``` from psonic import * synth(SINE, note=D4) synth(SQUARE, note=D4) synth(TRI, note=D4, amp=0.4) detune = 0.7 synth(SQUARE, note = E4) synth(SQUARE, note = E4+detune) detune=0.1 # Amplitude shaping synth(SQUARE, note = E2, release = 2) synth(SQUARE, note = E2+detune, amp = 2, release = 2) synth(GNOISE, release = 2, amp = 1, cutoff = 60) synth(GNOISE, release = 0.5, amp = 1, cutoff = 100) synth(NOISE, release = 0.2, amp = 1, cutoff = 90) ``` ### Next Step Using FX *Not implemented yet* ``` from psonic import * with Fx(SLICER): synth(PROPHET,note=E2,release=8,cutoff=80) synth(PROPHET,note=E2+4,release=8,cutoff=80) with Fx(SLICER, phase=0.125, probability=0.6,prob_pos=1): synth(TB303, note=E2, cutoff_attack=8, release=8) synth(TB303, note=E3, cutoff_attack=4, release=8) synth(TB303, note=E4, cutoff_attack=2, release=8) ``` ## OSC Communication (Sonic Pi Ver. 3.x or better) In Sonic Pi version 3 or better you can work with messages. ``` from psonic import * ``` First you need a programm in the Sonic Pi server that receives messages. You can write it in th GUI or send one with Python. ``` run("""live_loop :foo do use_real_time a, b, c = sync "/osc/trigger/prophet" synth :prophet, note: a, cutoff: b, sustain: c end """) ``` Now send a message to Sonic Pi. ``` send_message('/trigger/prophet', 70, 100, 8) stop() ``` ## More Examples ``` from psonic import * #Inspired by Steve Reich Clapping Music clapping = [1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0] for i in range(13): for j in range(4): for k in range(12): if clapping[k] ==1 : sample(DRUM_SNARE_SOFT,pan=-0.5) if clapping[(i+k)%12] == 1: sample(DRUM_HEAVY_KICK,pan=0.5) sleep (0.25) ``` ## Projects that use Python-Sonic Raspberry Pi sonic-track.py a Sonic-pi Motion Track Demo https://github.com/pageauc/sonic-track ## Sources Joe Armstrong: Connecting Erlang to the Sonic Pi http://joearms.github.io/2015/01/05/Connecting-Erlang-to-Sonic-Pi.html Joe Armstrong: Controlling Sound with OSC Messages http://joearms.github.io/2016/01/29/Controlling-Sound-with-OSC-Messages.html ..
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/image_displacement.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_displacement.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/image_displacement.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_displacement.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset import math # Load the two images to be registered. image1 = ee.Image('SKYSAT/GEN-A/PUBLIC/ORTHO/MULTISPECTRAL/s01_20150502T082736Z') image2 = ee.Image('SKYSAT/GEN-A/PUBLIC/ORTHO/MULTISPECTRAL/s01_20150305T081019Z') # Use bicubic resampling during registration. image1Orig = image1.resample('bicubic') image2Orig = image2.resample('bicubic') # Choose to register using only the 'R' bAnd. image1RedBAnd = image1Orig.select('R') image2RedBAnd = image2Orig.select('R') # Determine the displacement by matching only the 'R' bAnds. displacement = image2RedBAnd.displacement(**{ 'referenceImage': image1RedBAnd, 'maxOffset': 50.0, 'patchWidth': 100.0 }) # Compute image offset And direction. offset = displacement.select('dx').hypot(displacement.select('dy')) angle = displacement.select('dx').atan2(displacement.select('dy')) # Display offset distance And angle. Map.addLayer(offset, {'min':0, 'max': 20}, 'offset') Map.addLayer(angle, {'min': -math.pi, 'max': math.pi}, 'angle') Map.setCenter(37.44,0.58, 15) # Use the computed displacement to register all Original bAnds. registered = image2Orig.displace(displacement) # Show the results of co-registering the images. visParams = {'bands': ['R', 'G', 'B'], 'max': 4000} Map.addLayer(image1Orig, visParams, 'Reference') Map.addLayer(image2Orig, visParams, 'BefOre Registration') Map.addLayer(registered, visParams, 'After Registration') alsoRegistered = image2Orig.register(**{ 'referenceImage': image1Orig, 'maxOffset': 50.0, 'patchWidth': 100.0 }) Map.addLayer(alsoRegistered, visParams, 'Also Registered') ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
[Loss Function](https://www.bualabs.com/archives/2673/what-is-loss-function-cost-function-error-function-loss-function-how-cost-function-work-machine-learning-ep-1/) หรือ Cost Function คือ การคำนวน Error ว่า yhat ที่โมเดลทำนายออกมา ต่างจาก y ของจริง อยู่เท่าไร แล้วหาค่าเฉลี่ย เพื่อที่จะนำมาหา Gradient ของ Loss ขึ้นกับ Weight ต่าง ๆ ด้วย Backpropagation แล้วใช้อัลกอริทึม [Gradient Descent](https://www.bualabs.com/archives/631/what-is-gradient-descent-in-deep-learning-what-is-stochastic-gradient-descent-sgd-optimization-ep-1/) ทำให้ Loss น้อยลง ในการเทรนรอบถัดไป ในเคสนี้เราจะพูดถึง Loss Function สำหรับงาน Classification (Discrete ค่าไม่ต่อเนื่อง) ที่เป็นที่นิยมมากที่สุด ได้แก่ Cross Entropy Loss * yhat เป็น Probability ที่ออกมาจากโมเดลที่ Layer สุดท้ายเป็น [Softmax Function](https://www.bualabs.com/archives/1819/what-is-softmax-function-how-to-use-softmax-function-benefit-of-softmax/) * y เป็นข้อมูลที่อยู่ในรูปแบบ [One Hot Encoding](https://www.bualabs.com/archives/1902/what-is-one-hot-encoding-benefit-one-hot-encoding-why-one-hot-encoding-in-machine-learning/) # 0. Import ``` import torch from torch import tensor import matplotlib.pyplot as plt ``` # 1. Data เราจะสร้างข้อมูลตัวอย่างขึ้นมา Dog = 0, Cat 1, Rat = 2 ## y สมมติค่า y จากข้อมูลตัวอย่าง ที่เราต้องการจริง ๆ เป็นดังนี้ ``` y = tensor([0, 1, 2, 0, 0, 1, 0, 2, 2, 1]) y n, c = len(y), y.max()+1 y_onehot = torch.zeros(n, c) y_onehot[torch.arange(n), y] = 1 y_onehot ``` ## yhat สมมติว่า โมเดลเราทำนายออกมาได้ nn ``` yhat = tensor([[3., 2., 1.], [5., 6., 2.], [0., 0., 5.], [2., 3., 1.], [5., 4., 3.], [1., 0., 3.], [5., 3., 2.], [2., 2., 4.], [8., 5., 3.], [3., 4., 0.]]) ``` เราจะใช้ [Softmax Function จาก ep ที่แล้ว](https://www.bualabs.com/archives/1819/what-is-softmax-function-how-to-use-softmax-function-benefit-of-softmax/) แล้วเติม log เอาไว้สำหรับใช้ในขั้นตอนถัดไป $$\hbox{softmax(x)}_{i} = \frac{e^{x_{i}}}{\sum_{0 \leq j \leq n-1} e^{x_{j}}}$$ ``` def log_softmax(z): z = z - z.max(-1, keepdim=True)[0] exp_z = torch.exp(z) sum_exp_z = torch.sum(exp_z, -1, keepdim=True) return (exp_z / sum_exp_z).log() ``` yhat กลายเป็น Probability ของ 3 Category ``` log_softmax(yhat) ``` ## argmax เปรียบเทียบ y และ yhat argmax ใช้หาตำแหน่งที่ มีค่ามากที่สุด ในที่นี้ เราสนใจค่ามากที่สุดใน มิติที่ 1 ``` yhat.argmax(1) y ``` ตรงกัน 7 อัน ``` (yhat.argmax(1) == y).sum() ``` # 2. Cross Entropy Loss Cross Entropy Loss (Logistic Regression) หรือ Log Loss คือ การคำนวน Error ว่า yhat ต่างจาก y อยู่เท่าไร ด้วยการนำ Probability มาคำนวน หมายถึง ทายถูก แต่ไม่มั่นใจก็จะ Loss มาก หรือ ยิ่งทายผิด แต่มั่นใจมาก ก็จะ Loss มาก โดยคำนวนทั้ง Batch แล้วหาค่าเฉลี่ย * p(x) มีค่าระหว่าง 0 ถึง 1 (ทำให้ผ่าน log แล้วติดลบ เมื่อเจอกับเครื่องหมายลบด้านหน้า จะกลายเป็นบวก) * Cross Entropy Loss มีค่าระหว่าง 0 ถึง Infinity (ถ้าเป็น 0 คือไม่ Error เลย) # 2.1 สูตร Cross Entropy Loss เรียกว่า Negative Log Likelihood $$ NLL = -\sum x\, \log p(x) $$ เนื่องจาก ค่า $x$ อยู่ในรูป One Hot Encoding เราสามารถเขียนใหม่ได้เป็น $-\log(p_{i})$ โดย i เป็น Index ของ y ที่เราต้องการ ## 2.2 โค้ด Negative Log Likelihood ``` # log_probs = log of probability, target = target def nll(log_probs, target): return -(log_probs[torch.arange(log_probs.size()[0]), target]).mean() ``` ## 2.3 การใช้งาน Negative Log Likelihood ``` loss = nll(log_softmax(yhat), y) loss ``` ## 2.4 Optimize เนื่องจาก $$\log \left ( \frac{a}{b} \right ) = \log(a) - \log(b)$$ ทำให้เราแยก เศษและส่วน ออกเป็น 2 ก่อนลบกัน และถ้า x ใหญ่เกินไป เมื่อนำมา exp จะทำให้ nan ได้ จากสูตรด้านล่าง $$\log \left ( \sum_{j=1}^{n} e^{x_{j}} \right ) = \log \left ( e^{a} \sum_{j=1}^{n} e^{x_{j}-a} \right ) = a + \log \left ( \sum_{j=1}^{n} e^{x_{j}-a} \right )$$ a คือ max(x) เราสามารถ exp(x-a) ให้ x เป็นค่าติดลบให้หมด เมื่อ exp จะได้ไม่เกิน 1 แล้วค่อยไปบวกก a กลับทีหลังได้ จาก 2 สูตรด้านบน เราสามารถ Optimize โค้ด ได้ดังนี้ ``` def log_softmax2(z): m = z.max(-1, keepdim=True)[0] return z - ((z-m).exp().sum(-1, keepdim=True).log()+m) ``` หรือ ``` def log_softmax3(z): return z - (z).logsumexp(-1, keepdim=True) ``` ### เปรียบเทียบผลลัพธ์กับ PyTorch ``` import torch.nn.functional as F F.cross_entropy(yhat, y) nll(log_softmax(yhat), y) nll(log_softmax2(yhat), y) nll(log_softmax3(yhat), y) ``` ผลลัพธ์ถูกต้อง ตรงกับ PyTorch F.cross_entropy ## 2.5 พล็อตกราฟ เราจะสมมติว่า Dog = 0, Cat = 1 และในข้อมูลตัวอย่างมีแต่ Dog (0) อย่างเดียว เราจะลองดูว่าพล็อตกราฟไล่ตั้งแต่ ความน่าจะเป็น 0-100% เราจะสร้างข้อมูลตัวอย่างขึ้นมา ให้ y เป็น 0 จำนวน 100 ตัว แทนรูปภาพ Dog 100 รูป เราจะได้เอาไว้พล็อตกราฟ ``` y = torch.zeros(100) y[:10] ``` yhat คือ Output ของโมเดลว่า ความน่าจะเป็นรูป Dog (Column 0) และความน่าจะเป็นรูป Cat (Column 1) เราจะไล่ข้อมูลตั้งแต่ (หมา 0% แมว 100%) ไปยัง (หมา 100% แมว 0%) ``` yhat = torch.zeros(100, 2) yhat[range(0, 100), 0] = torch.arange(0., 1., 0.01) yhat[:, 1] = 1-yhat[:, 0] yhat[:10] ``` คำนวนค่าความน่าจะเป็น ของทั้ง 2 Class เอาไว้พล็อตกราฟ ``` classes = torch.tensor([0., 1.]) yhat_classes = yhat @ classes.t() yhat_classes[:10] ``` Log ค่า Probability (ของจริงจะมาจาก Softmax ตามตัวอย่างด้านบน) เตรียมไว้เข้าสูตร ``` log_probs = yhat.log() log_probs[:10] ``` Negative Log Likelihood ``` loss = -(log_probs[torch.arange(log_probs.size()[0]), y.long()]) loss[:10] ``` ### พล็อตกราฟ y, yhat, loss และ log loss * ข้อมูลตัวอย่าง y ที่สมมติว่าเท่ากับ 0 อย่างเดียว (เส้นสีแดง) เทียบกับ yhat ที่ทำนายไล่ตั้งแต่ 1 ไปถึง 0 (ทายผิดไล่ไปถึงทายถูก เส้นสีเขียว) * สังเกต Loss สีส้ม เริ่มจากซ้ายสุด Ground Truth เท่ากับ 0 (เส้นสีแดง) แต่โมเดลทายผิด ทายว่าเป็น 1 (เส้นสีเขียว) ด้วยความมั่นใจ 100% ทำให้ Loss พุ่งขึ้นถึง Infinity * เลื่อนมาตรงกลาง Loss จะลดลงอย่างรวดเร็ว เมื่อโมเดลทายผิด แต่ไม่ได้มั่นใจเต็มร้อย * ด้านขวา Loss ลดลงเรื่อย ๆ จนเป็น 0 เมื่อโมเดลทายถูก ว่าเป็น 0 ด้วยความมั่นใจ 100% * Log of Loss คือเปลี่ยน Loss ที่อยู่ช่วง Infinity ถึง 0 เป็น Log Scale จะได้ช่วง Infinity ถึง -Infinity จะได้ Balance ดูง่ายขึ้น ``` fig,ax = plt.subplots(figsize=(9, 9)) ax.scatter(yhat[:,0].numpy(), loss.log(), label="Log of Loss") ax.scatter(yhat[:,0].numpy(), loss, label="Loss") ax.plot(yhat[:,0].numpy(), yhat_classes.numpy(), label="yhat", color='green') ax.plot(yhat[:,0].numpy(), y.numpy(), label="y", color='red') ax.grid(True) ax.legend(loc='upper right') ``` # 3. Loss Function อื่น ๆ เราจะเป็นที่ต้องเข้าใจความเป็นมา และกลไกการทำงานภายใน ของ Loss Function เนื่องจากเมื่อเราต้องการออกแบบโมเดล ในการแก้ปัญหาที่ซับซ้อนมากขึ้น เราต้องออกแบบ Loss Function ให้เข้ากับงานนั้นด้วย เช่น อาจจะเอาหลาย ๆ Loss Function เช่น [Regression Loss](https://www.bualabs.com/archives/1928/what-is-mean-absolute-error-mae-mean-squared-error-mse-root-mean-squared-error-rmse-loss-function-ep-2/) มาผสมกัน แล้ว Weight น้ำหนัก รวมเป็น Loss ที่เราต้องการ เป็นต้น ``` ```
github_jupyter
# Example: CanvasXpress circular Chart No. 6 This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at: https://www.canvasxpress.org/examples/circular-6.html This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function. Everything required for the chart to render is included in the code below. Simply run the code block. ``` from canvasxpress.canvas import CanvasXpress from canvasxpress.js.collection import CXEvents from canvasxpress.render.jupyter import CXNoteBook cx = CanvasXpress( render_to="circular6", data={ "z": { "chr": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 22, 22, 22, "X", "X", "X", "X", "X", "X", "X", "X", "Y", "Y", "Y", "X", 3, 19, 5, 11, 10, 15, 18, 11, 13, 14, 21, 1, 7, 14, "Y", 21, 2, 18, 1, 7, 9, 7, 19, 19, 20, 20, 9, "X", 16, 8, 20, "X", 18 ], "pos": [ 176158308, 195792629, 229516707, 127588847, 79643728, 185593801, 9679485, 244523632, 236753568, 128133434, 228644565, 150003054, 219011541, 168916847, 26949439, 102746811, 2474221, 209897353, 113021141, 77762431, 163020942, 171034774, 213334477, 97455775, 83291531, 143519956, 122953780, 134434993, 6501153, 36509633, 134712403, 16094381, 159112661, 16092021, 29530674, 98680615, 19640420, 108401923, 143243174, 16342895, 42326293, 115086153, 86673182, 138017594, 40287060, 133573077, 138457582, 17843222, 54643446, 31433785, 74774102, 178335068, 56846964, 539920, 95028169, 121007542, 131105053, 79720263, 48227800, 142747889, 62543189, 50598801, 33328141, 158733438, 47107967, 5246518, 131713113, 12326167, 58372056, 28321194, 108652542, 103359699, 103536939, 56208609, 87012547, 3341929, 124836752, 59833292, 39064309, 31063538, 67409926, 10777547, 48520782, 18875793, 81484304, 35095469, 120807273, 36875340, 126128712, 100677585, 118570992, 9612077, 77867215, 19151335, 53602699, 49087920, 38708284, 113120818, 101439886, 75343477, 26249259, 54093637, 20596380, 98938748, 40533585, 89574094, 80301557, 56696139, 106845694, 10555451, 101114606, 50732192, 17458821, 9173140, 86898750, 76472186, 16266789, 93249681, 87911171, 9404454, 56147990, 54904212, 87210495, 20386568, 32880981, 14002843, 12161519, 39664472, 73880383, 47714897, 868308, 66004051, 24127310, 54211025, 15902150, 8721825, 46962668, 39093389, 55603291, 41233282, 63103970, 10443615, 6248945, 24491648, 19429871, 4170936, 40286870, 54989240, 39471767, 44811148, 28873711, 7738820, 20461957, 23030024, 7678949, 113205707, 9117671, 55230156, 73702110, 105064343, 7484814, 6345698, 45891043, 14020510, 2971362, 29256349, 146673737, 141355615, 31567310, 3395353, 2464888, 68581686, 60314299, 58307384, 26528612, 38283186, 43483316, 41830860, 160212793, 83692467, 39167742, 8309133, 26927848, 197477698, 65796042, 145369367, 91838386, 59033170, 31843957, 33440512, 47490053, 49915055, 1878719, 93047574, 145870982, 75626142, 35819134, 60862499, 18121170, 49128537 ], "Annt1": [ "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2" ], "Annt2": [ "Desc:3", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:3", "Desc:2", "Desc:3", "Desc:3", "Desc:3", "Desc:1", "Desc:3", "Desc:2", "Desc:2", "Desc:3", "Desc:1", "Desc:2", "Desc:2", "Desc:3", "Desc:3", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:3", "Desc:1", "Desc:1", "Desc:3", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:3", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:3", "Desc:1", "Desc:1", "Desc:3", "Desc:1", "Desc:1", "Desc:3", "Desc:3", "Desc:2", "Desc:3", "Desc:2", "Desc:2", "Desc:1", "Desc:3", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:3", "Desc:2", "Desc:2", "Desc:3", "Desc:1", "Desc:3", "Desc:2", "Desc:3", "Desc:2", "Desc:1", "Desc:1", "Desc:3", "Desc:3", "Desc:2", "Desc:1", "Desc:3", "Desc:3", "Desc:2", "Desc:2", "Desc:3", "Desc:3", "Desc:2", "Desc:3", "Desc:1", "Desc:3", "Desc:1", "Desc:3", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:3", "Desc:3", "Desc:1", "Desc:3", "Desc:1", "Desc:3", "Desc:3", "Desc:3", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:3", "Desc:2", "Desc:2", "Desc:3", "Desc:3", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:3", "Desc:2", "Desc:3", "Desc:1", "Desc:3", "Desc:2", "Desc:2", "Desc:2", "Desc:3", "Desc:3", "Desc:2", "Desc:1", "Desc:3", "Desc:2", "Desc:3", "Desc:1", "Desc:1", "Desc:2", "Desc:1", "Desc:2", "Desc:3", "Desc:2", "Desc:2", "Desc:1", "Desc:3", "Desc:1", "Desc:2", "Desc:3", "Desc:1", "Desc:3", "Desc:2", "Desc:3", "Desc:2", "Desc:2", "Desc:1", "Desc:3", "Desc:2", "Desc:2", "Desc:1", "Desc:3", "Desc:3", "Desc:3", "Desc:2", "Desc:3", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:3", "Desc:1", "Desc:2", "Desc:3", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:3", "Desc:2", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:3", "Desc:2", "Desc:1", "Desc:1", "Desc:1" ], "Annt3": [ "Desc:4", "Desc:2", "Desc:4", "Desc:2", "Desc:4", "Desc:4", "Desc:4", "Desc:2", "Desc:1", "Desc:3", "Desc:4", "Desc:2", "Desc:4", "Desc:3", "Desc:4", "Desc:3", "Desc:4", "Desc:4", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:4", "Desc:1", "Desc:3", "Desc:1", "Desc:4", "Desc:4", "Desc:1", "Desc:1", "Desc:2", "Desc:3", "Desc:1", "Desc:1", "Desc:4", "Desc:3", "Desc:4", "Desc:3", "Desc:1", "Desc:3", "Desc:2", "Desc:3", "Desc:1", "Desc:1", "Desc:1", "Desc:3", "Desc:3", "Desc:4", "Desc:2", "Desc:4", "Desc:4", "Desc:4", "Desc:1", "Desc:1", "Desc:3", "Desc:2", "Desc:4", "Desc:2", "Desc:4", "Desc:4", "Desc:2", "Desc:1", "Desc:4", "Desc:2", "Desc:2", "Desc:4", "Desc:3", "Desc:2", "Desc:3", "Desc:2", "Desc:1", "Desc:2", "Desc:1", "Desc:1", "Desc:1", "Desc:4", "Desc:4", "Desc:2", "Desc:4", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:4", "Desc:1", "Desc:1", "Desc:4", "Desc:4", "Desc:1", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:3", "Desc:1", "Desc:3", "Desc:3", "Desc:3", "Desc:2", "Desc:1", "Desc:1", "Desc:3", "Desc:4", "Desc:4", "Desc:3", "Desc:3", "Desc:1", "Desc:2", "Desc:1", "Desc:4", "Desc:2", "Desc:4", "Desc:3", "Desc:2", "Desc:1", "Desc:4", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:3", "Desc:3", "Desc:4", "Desc:4", "Desc:3", "Desc:3", "Desc:1", "Desc:4", "Desc:3", "Desc:4", "Desc:2", "Desc:3", "Desc:4", "Desc:3", "Desc:4", "Desc:4", "Desc:4", "Desc:3", "Desc:3", "Desc:4", "Desc:1", "Desc:2", "Desc:1", "Desc:3", "Desc:2", "Desc:4", "Desc:3", "Desc:4", "Desc:2", "Desc:1", "Desc:3", "Desc:2", "Desc:3", "Desc:1", "Desc:4", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:4", "Desc:3", "Desc:4", "Desc:4", "Desc:4", "Desc:1", "Desc:1", "Desc:3", "Desc:4", "Desc:1", "Desc:4", "Desc:1", "Desc:3", "Desc:3", "Desc:2", "Desc:1", "Desc:4", "Desc:2", "Desc:3", "Desc:2", "Desc:1", "Desc:2", "Desc:4", "Desc:1", "Desc:4", "Desc:3", "Desc:4", "Desc:3", "Desc:2", "Desc:2", "Desc:4", "Desc:2", "Desc:3", "Desc:4", "Desc:1", "Desc:1", "Desc:1", "Desc:4", "Desc:4", "Desc:1" ], "Annt4": [ "Desc:4", "Desc:2", "Desc:3", "Desc:4", "Desc:4", "Desc:5", "Desc:1", "Desc:1", "Desc:1", "Desc:2", "Desc:3", "Desc:1", "Desc:2", "Desc:2", "Desc:3", "Desc:4", "Desc:3", "Desc:3", "Desc:2", "Desc:1", "Desc:2", "Desc:3", "Desc:1", "Desc:5", "Desc:2", "Desc:4", "Desc:3", "Desc:2", "Desc:5", "Desc:3", "Desc:1", "Desc:4", "Desc:2", "Desc:4", "Desc:1", "Desc:3", "Desc:3", "Desc:5", "Desc:4", "Desc:1", "Desc:2", "Desc:4", "Desc:2", "Desc:3", "Desc:2", "Desc:5", "Desc:5", "Desc:4", "Desc:4", "Desc:5", "Desc:1", "Desc:4", "Desc:3", "Desc:5", "Desc:2", "Desc:4", "Desc:3", "Desc:1", "Desc:1", "Desc:3", "Desc:2", "Desc:2", "Desc:4", "Desc:2", "Desc:3", "Desc:1", "Desc:4", "Desc:2", "Desc:2", "Desc:3", "Desc:1", "Desc:5", "Desc:4", "Desc:5", "Desc:2", "Desc:2", "Desc:2", "Desc:3", "Desc:5", "Desc:1", "Desc:4", "Desc:2", "Desc:4", "Desc:2", "Desc:2", "Desc:2", "Desc:5", "Desc:1", "Desc:4", "Desc:5", "Desc:4", "Desc:1", "Desc:1", "Desc:1", "Desc:3", "Desc:5", "Desc:5", "Desc:1", "Desc:2", "Desc:2", "Desc:2", "Desc:5", "Desc:1", "Desc:2", "Desc:5", "Desc:2", "Desc:2", "Desc:5", "Desc:5", "Desc:3", "Desc:5", "Desc:4", "Desc:5", "Desc:4", "Desc:3", "Desc:3", "Desc:3", "Desc:4", "Desc:2", "Desc:2", "Desc:2", "Desc:5", "Desc:2", "Desc:5", "Desc:4", "Desc:4", "Desc:4", "Desc:3", "Desc:4", "Desc:4", "Desc:1", "Desc:2", "Desc:5", "Desc:5", "Desc:5", "Desc:4", "Desc:2", "Desc:2", "Desc:4", "Desc:4", "Desc:3", "Desc:5", "Desc:5", "Desc:2", "Desc:4", "Desc:2", "Desc:5", "Desc:5", "Desc:5", "Desc:4", "Desc:2", "Desc:2", "Desc:3", "Desc:5", "Desc:5", "Desc:5", "Desc:2", "Desc:3", "Desc:5", "Desc:1", "Desc:2", "Desc:1", "Desc:4", "Desc:4", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:2", "Desc:1", "Desc:1", "Desc:2", "Desc:2", "Desc:1", "Desc:2", "Desc:4", "Desc:3", "Desc:5", "Desc:3", "Desc:5", "Desc:3", "Desc:1", "Desc:1", "Desc:3", "Desc:4", "Desc:2", "Desc:1", "Desc:5", "Desc:3", "Desc:4", "Desc:4", "Desc:5", "Desc:1", "Desc:2", "Desc:2", "Desc:3", "Desc:5", "Desc:3", "Desc:2", "Desc:2" ] }, "x": { "Factor1": [ "Lev:2", "Lev:1", "Lev:1", "Lev:1", "Lev:2", "Lev:1", "Lev:1", "Lev:1", "Lev:2", "Lev:1" ], "Factor2": [ "Lev:1", "Lev:2", "Lev:2", "Lev:1", "Lev:1", "Lev:3", "Lev:3", "Lev:1", "Lev:1", "Lev:2" ], "Factor3": [ "Lev:4", "Lev:1", "Lev:2", "Lev:1", "Lev:4", "Lev:4", "Lev:1", "Lev:4", "Lev:2", "Lev:2" ], "Factor4": [ "Lev:1", "Lev:4", "Lev:2", "Lev:4", "Lev:5", "Lev:2", "Lev:2", "Lev:1", "Lev:4", "Lev:3" ] }, "y": { "vars": [ "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V13", "V14", "V15", "V16", "V17", "V18", "V19", "V20", "V21", "V22", "V23", "V24", "V25", "V26", "V27", "V28", "V29", "V30", "V31", "V32", "V33", "V34", "V35", "V36", "V37", "V38", "V39", "V40", "V41", "V42", "V43", "V44", "V45", "V46", "V47", "V48", "V49", "V50", "V51", "V52", "V53", "V54", "V55", "V56", "V57", "V58", "V59", "V60", "V61", "V62", "V63", "V64", "V65", "V66", "V67", "V68", "V69", "V70", "V71", "V72", "V73", "V74", "V75", "V76", "V77", "V78", "V79", "V80", "V81", "V82", "V83", "V84", "V85", "V86", "V87", "V88", "V89", "V90", "V91", "V92", "V93", "V94", "V95", "V96", "V97", "V98", "V99", "V100", "V101", "V102", "V103", "V104", "V105", "V106", "V107", "V108", "V109", "V110", "V111", "V112", "V113", "V114", "V115", "V116", "V117", "V118", "V119", "V120", "V121", "V122", "V123", "V124", "V125", "V126", "V127", "V128", "V129", "V130", "V131", "V132", "V133", "V134", "V135", "V136", "V137", "V138", "V139", "V140", "V141", "V142", "V143", "V144", "V145", "V146", "V147", "V148", "V149", "V150", "V151", "V152", "V153", "V154", "V155", "V156", "V157", "V158", "V159", "V160", "V161", "V162", "V163", "V164", "V165", "V166", "V167", "V168", "V169", "V170", "V171", "V172", "V173", "V174", "V175", "V176", "V177", "V178", "V179", "V180", "V181", "V182", "V183", "V184", "V185", "V186", "V187", "V188", "V189", "V190", "V191", "V192", "V193", "V194", "V195", "V196", "V197", "V198", "V199", "V200" ], "smps": [ "S1", "S2", "S3", "S4", "S5", "S6", "S7", "S8", "S9", "S10" ], "data": [ [ 52.79, 24.71, 14.35, 22.23, 42.42, 12.38, 19.18, 19.6, 51.81, 20.2 ], [ 53.39, 28.1, 8.02, 24.12, 21.36, 28.89, 16.29, 27.44, 38.8, 18.3 ], [ 31.11, 13.84, 16.32, 7.62, 29.04, 2.66, 11.83, 8.6, 52.39, 8.55 ], [ 42.48, 6.54, 14.22, 6.19, 51.77, 6.26, 6.4, 4.32, 47.32, 3.13 ], [ 21.44, 8.39, 17.61, 1.59, 42.14, 6.91, 14.92, 5.04, 30.25, 20.55 ], [ 47.6, 25.45, 7.53, 6.22, 40.27, 10.96, 28.39, 19.22, 37.05, 11.5 ], [ 22.9, 23.06, 3.38, 25.36, 31.83, 1.15, 25.07, 10.77, 23.24, 24.39 ], [ 35.39, 3.88, 19.33, 20.16, 42.14, 21.13, 25.74, 24.42, 25.39, 2.43 ], [ 52.37, 6.73, 13.85, 11.98, 21.89, 4.12, 24.02, 19, 25.97, 23.68 ], [ 52.58, 23.67, 7.66, 21.44, 47.43, 4.87, 22.18, 20.36, 52.05, 2.86 ], [ 31.75, 16.88, 21.22, 1.41, 45.79, 26.28, 17.74, 9.64, 22.42, 9.45 ], [ 24.37, 4.16, 24.06, 23.13, 41.6, 27.75, 13.84, 7.34, 45.81, 16.44 ], [ 46.31, 15.54, 6.64, 22.44, 52.65, 14.93, 5.5, 24.09, 22.05, 9.59 ], [ 35.7, 19.79, 2.5, 26.23, 34.06, 23.88, 16.45, 7.96, 24.18, 3.22 ], [ 35.47, 5.46, 8.68, 2.91, 22.42, 6.71, 1.83, 22.89, 28.1, 14.21 ], [ 52.58, 27.38, 27.51, 14.4, 47.27, 15.92, 13.59, 26.11, 33, 23.06 ], [ 38.08, 15.55, 28.65, 28.74, 47.06, 16.09, 20.78, 26.79, 43.36, 22.28 ], [ 53.38, 7.1, 26.57, 11.79, 40.73, 26.29, 26.21, 5.92, 53.16, 6.62 ], [ 29.9, 13.26, 7.23, 5.38, 53.67, 28.59, 25.78, 21.36, 52.78, 13.46 ], [ 52.6, 19.51, 14.72, 15.76, 52.13, 11.8, 1.05, 20.58, 29.03, 17.05 ], [ 32.48, 24.14, 21.4, 11.61, 32.99, 7.31, 10.01, 27.88, 43.2, 1.14 ], [ 30.62, 26.5, 26.38, 24.57, 21.88, 20.69, 13.17, 13.96, 41.89, 28.89 ], [ 43.32, 18.91, 11.68, 22.23, 26.85, 11.23, 25.33, 2.87, 46.3, 9.78 ], [ 22.35, 22.74, 27.62, 12.85, 36.96, 1.77, 26.33, 2.53, 27.88, 26.44 ], [ 52.04, 3.77, 14.24, 14.65, 28.39, 14.11, 25.47, 21.81, 42.57, 18.74 ], [ 47.25, 13.18, 26.74, 17.95, 53.01, 26.92, 1.61, 4.51, 30.27, 7.34 ], [ 25.46, 16.02, 25.93, 11.33, 53.77, 17.54, 1.39, 24.03, 23.83, 13.23 ], [ 22.81, 10.33, 24.77, 26.01, 28.95, 23.99, 12.01, 17.69, 35.64, 12.08 ], [ 49.27, 26.71, 6.64, 14.77, 51.25, 28.48, 1.55, 10.43, 31.01, 17.18 ], [ 42.41, 10.58, 15.27, 14.52, 40.17, 22.83, 12.05, 1.56, 22.99, 27.03 ], [ 45.11, 18.03, 2.03, 22.29, 47.98, 18.79, 1.27, 3.41, 30.1, 9.83 ], [ 52.17, 16.1, 19.34, 5.16, 42.09, 20.79, 20.52, 1.7, 29.79, 28.78 ], [ 42.62, 6.73, 9.88, 20.52, 32.33, 23.13, 27.96, 18.97, 47.05, 26.31 ], [ 34.15, 27.79, 1.84, 8.72, 23.68, 13.74, 8.7, 13.29, 45.97, 10.63 ], [ 53.41, 8.13, 27.7, 1.16, 33.81, 17.28, 17.65, 3.52, 26.6, 1.81 ], [ 46.08, 16.98, 2.9, 6.39, 48.8, 14.27, 1.16, 19.95, 26.05, 16.55 ], [ 47.68, 20.6, 26.02, 2.24, 30.25, 5.2, 27.69, 27.32, 25.77, 1.83 ], [ 43.9, 17.09, 3.2, 27.51, 21.37, 25.93, 26.64, 20.24, 33.7, 17.47 ], [ 48.23, 13.08, 5.52, 7.6, 49.08, 28.7, 8.77, 13.11, 32.41, 2.72 ], [ 33.18, 9.4, 10.42, 21.24, 44.66, 25.64, 12.85, 7.75, 21.55, 28.84 ], [ 42.38, 23.57, 21.18, 25.08, 43.04, 11.07, 14.21, 1.42, 32.97, 21.7 ], [ 30.55, 3.97, 4.38, 28.78, 39.17, 12.88, 4.53, 18.51, 48.28, 7.76 ], [ 41.59, 14.67, 21.58, 15.97, 32.76, 6.12, 26.85, 15.79, 41.7, 7.31 ], [ 46, 15.58, 27.91, 1.88, 31.55, 28.62, 14.72, 15.09, 52.69, 8.05 ], [ 52.78, 22.19, 15.16, 1.41, 45.68, 9.69, 12.1, 7.3, 21.85, 3.27 ], [ 47.42, 27.04, 15.18, 26.67, 23.72, 24.41, 28.73, 22.77, 22.13, 8.03 ], [ 25.76, 1.63, 11.07, 19.24, 29.78, 9.65, 21.95, 13.94, 48.78, 7.68 ], [ 48.6, 27.58, 20.39, 19.72, 35.11, 28.69, 23.7, 1.95, 33.49, 27.96 ], [ 41.85, 15.79, 7.88, 16.83, 52.66, 16.14, 5.35, 18.82, 27.15, 5.45 ], [ 45.62, 21.21, 11.66, 5.16, 22.28, 11.81, 16.28, 15.32, 33.85, 22.43 ], [ 46.29, 25.75, 19.88, 20.76, 22.11, 21.46, 7.11, 26.73, 44.82, 3.51 ], [ 24.36, 8.99, 9.74, 27.87, 32.85, 27.9, 18.32, 14.07, 37.25, 17.46 ], [ 36.43, 20.25, 1.86, 10.53, 21.23, 4.09, 19.13, 18.67, 36.86, 19.04 ], [ 25.61, 11.06, 9.71, 17.51, 32.42, 5.87, 28.71, 7.12, 44.66, 3.21 ], [ 28.35, 16.72, 6.6, 21.55, 39.72, 5.16, 9.52, 6.6, 41.89, 4.98 ], [ 31.16, 24.99, 5.19, 8.29, 32.85, 5.62, 21.49, 16.94, 48.36, 26.06 ], [ 28.1, 14.02, 18.97, 24.52, 48.45, 18.57, 11.57, 26.8, 23.16, 7.75 ], [ 34.22, 18.8, 4.05, 5.68, 22.38, 6.06, 19, 9.99, 38.28, 12.62 ], [ 50, 5.17, 13.58, 27.32, 48.93, 12.52, 12.53, 3.9, 27.92, 13.57 ], [ 51.8, 14.39, 27.67, 17.76, 49.12, 18.48, 5.37, 21.47, 26.36, 5.42 ], [ 21.06, 27.43, 4.66, 4.66, 43.69, 23.29, 10.97, 24.48, 39.68, 24.51 ], [ 38.43, 26.14, 9.59, 21.03, 21.09, 20.03, 19.43, 26.53, 35.09, 19.22 ], [ 52.81, 25.31, 18.03, 8.39, 47.76, 23.69, 26.31, 26.5, 37.19, 25.91 ], [ 25.14, 7.08, 17.93, 13.86, 21.06, 23.32, 27.77, 1.05, 51.25, 19.22 ], [ 48.36, 8.08, 18.26, 16.15, 28.64, 13.73, 13.87, 16.39, 42.95, 1.75 ], [ 37.43, 11.5, 23.1, 10.51, 48.75, 5.03, 28.38, 18.39, 27.03, 17.35 ], [ 22.67, 19.74, 20.84, 15.24, 40.62, 18.13, 5.79, 8.72, 45.6, 27.13 ], [ 39.33, 19.08, 25.75, 14.02, 38.13, 13.18, 25.47, 2.38, 33.72, 3.71 ], [ 32.02, 13.58, 25.5, 3.88, 22.28, 5.56, 8.13, 18.99, 32.71, 4.26 ], [ 26.62, 3.28, 25.59, 18.33, 27.65, 15.9, 20.44, 28.41, 46.91, 6.13 ], [ 51.91, 26.1, 2.84, 28.74, 31.25, 23.36, 12.53, 15.14, 51.49, 10.04 ], [ 48.71, 21.97, 15.89, 28.65, 49.3, 21.22, 3.76, 20.03, 42.07, 18.88 ], [ 23.13, 2.08, 10.52, 21.58, 48.12, 17.61, 4.93, 15.71, 26.94, 28.32 ], [ 25.16, 27.29, 27.77, 21.57, 53.14, 19.33, 6.46, 15.55, 38.21, 22.02 ], [ 27.01, 18.7, 18.35, 25.85, 34.58, 16.19, 13.52, 21.68, 33.73, 7.28 ], [ 40.99, 15.97, 19.43, 22.44, 46.51, 27.81, 11.62, 2.95, 44.24, 27.83 ], [ 27.63, 20.4, 23.63, 18.05, 39.83, 27.58, 26.87, 8.77, 34.69, 3.6 ], [ 27.43, 2.53, 1.74, 26.48, 22.16, 14.38, 7.54, 11.17, 43.99, 15.86 ], [ 37.72, 24.1, 13.48, 1.62, 31.68, 24.96, 23.16, 12.29, 25.18, 16.59 ], [ 46.47, 23.57, 6.71, 11.72, 53.77, 7.37, 1.13, 20.3, 22.93, 6.53 ], [ 49.41, 28.3, 16.59, 15.22, 27.74, 6.38, 3.01, 20.2, 38.05, 2.12 ], [ 52.69, 14.29, 4.48, 5.06, 38.24, 20.31, 13.41, 10.79, 35.45, 9.82 ], [ 48.98, 21.97, 22.63, 3.21, 46.84, 28.64, 5.27, 15.32, 23.21, 17.51 ], [ 25.76, 3.48, 16.51, 15.99, 40.09, 17.21, 22.1, 24.21, 22.85, 26.39 ], [ 29.57, 22.65, 14.76, 4.48, 47.37, 12.4, 21.85, 12.72, 25.18, 11.03 ], [ 22.01, 6.11, 22.28, 15.93, 46.41, 6.62, 21.88, 8.61, 23.99, 15.67 ], [ 23.29, 24.59, 2.47, 21.52, 23.92, 11.13, 14.74, 17.02, 33.5, 22.62 ], [ 30.62, 18.08, 2.31, 19.1, 45.56, 27.75, 3.24, 9.69, 42.93, 4.19 ], [ 24.13, 1.35, 11.88, 25.51, 48.22, 1.37, 28.94, 5.28, 38.25, 15.38 ], [ 49.63, 5.33, 18.6, 20.61, 22.34, 11.06, 2.22, 16.54, 53.47, 8.68 ], [ 42.2, 19.69, 4.01, 26.61, 34.98, 13.31, 4.99, 26.61, 47, 22.4 ], [ 48.24, 17.15, 28.34, 10.62, 30.8, 15.28, 21.08, 5.84, 49.72, 13.17 ], [ 51.71, 3.67, 25.57, 13.12, 38.31, 8.22, 22.73, 13.4, 47.61, 1.33 ], [ 41.29, 6.55, 21.66, 3.17, 36.62, 8.21, 19.98, 23.25, 50.76, 6.85 ], [ 40.45, 27.37, 2.53, 13.66, 28.2, 14.32, 5.53, 14.67, 45.83, 15.08 ], [ 42.23, 15.24, 10.62, 15.37, 33.92, 1.51, 8.22, 23.53, 49.44, 23.54 ], [ 30.88, 19.84, 8.42, 12.73, 24.76, 15.13, 23.73, 25.79, 48.92, 25.5 ], [ 52.89, 23.31, 3.3, 21.71, 29.33, 10.32, 6.42, 23.83, 25.11, 19.65 ], [ 41.38, 7.44, 4.04, 17.54, 22.94, 1.33, 8.61, 15.45, 49.55, 15.54 ], [ 36.81, 21.34, 3.01, 7.09, 40.02, 20.06, 21.23, 20.02, 31.02, 17.84 ], [ 52.29, 5.38, 9.99, 27.28, 29.16, 1.64, 7.27, 7.86, 23.61, 19.17 ], [ 48.66, 10.33, 15.84, 12.52, 31.17, 15.37, 9.89, 16.43, 25.28, 27.01 ], [ 22.81, 20.15, 13.89, 6.64, 30.87, 28.08, 11.89, 7.24, 44.44, 4.9 ], [ 34.43, 7.32, 7.79, 12.2, 33.55, 16.55, 13.96, 9.8, 51.31, 3.19 ], [ 39.06, 17.33, 9.56, 20.28, 40.86, 17.91, 22.71, 13.64, 31.37, 6.01 ], [ 45.03, 5.67, 22.87, 12.66, 29.29, 4.14, 7.56, 16.01, 36.65, 12.56 ], [ 51.77, 4.1, 26, 3.96, 32.23, 5.26, 11.71, 17.96, 39.59, 20.08 ], [ 49.81, 12.7, 1.62, 11.22, 52.71, 22.16, 13.26, 15, 42.64, 9.27 ], [ 43.15, 20.12, 28.12, 8.84, 26.77, 20.31, 9.84, 15.72, 24.4, 27.82 ], [ 41.2, 20.39, 7.76, 8.74, 39.75, 20.92, 15.3, 10.16, 50.94, 15.1 ], [ 43.31, 21.41, 24.5, 14.25, 42.58, 10.01, 9.79, 11.6, 24.88, 27.58 ], [ 38.03, 9.96, 18.37, 3.06, 28.42, 2.99, 1.76, 4.72, 39.11, 21.21 ], [ 38.78, 11.89, 15.05, 21.22, 38.79, 15.56, 7.43, 16.35, 24.81, 13.12 ], [ 29.68, 28.51, 27.6, 27.92, 44.52, 18.12, 25.71, 16.97, 36.77, 10.44 ], [ 52.9, 19.04, 28.71, 10.37, 27.04, 18.85, 28.06, 18.31, 31.31, 27.66 ], [ 50.77, 21.27, 28.56, 5.12, 35.39, 8.15, 11.73, 8.75, 43.82, 26.65 ], [ 28.09, 21.14, 17.46, 27.36, 51.84, 26.42, 13.57, 18.96, 33.6, 19.64 ], [ 29.81, 7.84, 27.68, 22.15, 34.79, 18.01, 26.49, 15.11, 44.04, 4 ], [ 43.29, 24.01, 13.9, 27.21, 42.29, 17.94, 16.38, 25.6, 49.04, 14.41 ], [ 31.46, 9.34, 8.84, 19.22, 52.91, 13.62, 6.53, 26.15, 24.57, 13.06 ], [ 42.03, 21.07, 27.35, 24.6, 45.33, 24.58, 5.59, 9.23, 51.35, 1.02 ], [ 41.69, 22.28, 12.03, 7.11, 32.98, 5.01, 15.36, 3.16, 22.33, 3.7 ], [ 47.62, 11.61, 28.85, 10.52, 36.34, 19.35, 22.81, 15.83, 42.17, 25.63 ], [ 30.29, 9.92, 2.32, 4.99, 34.83, 23.06, 17.59, 10.86, 45.17, 16.3 ], [ 49.02, 26.6, 15.79, 20.37, 52.66, 28.84, 19.57, 24.66, 42.02, 20.58 ], [ 37.26, 10.71, 22.96, 5.43, 29.59, 14.85, 16.53, 24, 52.94, 26.8 ], [ 33.25, 2.41, 17.29, 27.27, 32.39, 28.05, 24.16, 20.49, 28.8, 11.27 ], [ 39.38, 6.45, 20.91, 9.61, 45.47, 18.77, 9.13, 13.85, 31.49, 21.25 ], [ 51.05, 15.04, 20.68, 8.44, 48.1, 19.95, 19.61, 28.13, 35.33, 17.97 ], [ 29.34, 16.32, 6.32, 2.27, 53.69, 14.11, 18.18, 14.14, 35.59, 6.2 ], [ 36.38, 12.11, 8.04, 28.07, 24.38, 23.47, 22.72, 27.67, 49.74, 5.53 ], [ 29.6, 10.14, 24.5, 8.97, 48.86, 20.44, 1.83, 3.96, 21.76, 9.1 ], [ 52.36, 21.32, 23.69, 20.39, 46.22, 24.85, 21.1, 24.07, 30.68, 11.32 ], [ 22.61, 13.82, 28.27, 3.5, 32.82, 12.1, 28.91, 10.63, 52.58, 25.55 ], [ 25.18, 27.88, 26.97, 24.2, 53.01, 23.7, 22.25, 10.12, 29.71, 5.07 ], [ 23.97, 27.01, 9.14, 11.7, 23.19, 12.18, 20.88, 25.48, 38.24, 20.58 ], [ 49.63, 13.67, 1.34, 17.56, 50.43, 7.5, 4.14, 12.52, 48.7, 22.08 ], [ 51.08, 10.04, 18.23, 14.37, 44.22, 1.55, 7.89, 23.5, 24.09, 8.86 ], [ 32.88, 4.6, 4.6, 3.62, 48.38, 2.13, 28.81, 7.23, 25.57, 8.73 ], [ 32.27, 17.45, 28.26, 1.66, 39.41, 28.36, 2.61, 23.5, 26.42, 26.57 ], [ 45.43, 1.89, 19.53, 14.48, 31.9, 20.54, 1.01, 23.49, 53.54, 11.51 ], [ 53.68, 22.09, 15.49, 6.19, 40.87, 25.97, 25.33, 1.17, 31.83, 23.54 ], [ 36.4, 3.93, 11.53, 12.81, 28.34, 2.62, 4.94, 21.85, 31.44, 15.91 ], [ 38.8, 19.03, 24.23, 9.15, 30.01, 18.03, 19.11, 5.56, 45.77, 28.97 ], [ 50.44, 24.58, 14.49, 11.83, 53.21, 13.68, 9.97, 19.76, 35.37, 5.13 ], [ 31.63, 16.26, 23.55, 20.4, 27.16, 1.02, 21.76, 12.51, 23.73, 10.42 ], [ 38.24, 7.25, 5.81, 19.28, 30.62, 16.17, 19.11, 3.93, 49.73, 19.5 ], [ 46.77, 12.63, 25.33, 27.77, 23.56, 28.56, 18.59, 10.73, 25.1, 2.19 ], [ 49.79, 2.32, 1.86, 26.89, 41.33, 17.48, 14.26, 15.09, 51.34, 19.84 ], [ 26.78, 2.77, 19.56, 18.37, 45.22, 16.96, 3.91, 25.44, 35.53, 8.37 ], [ 51.84, 11.17, 21.27, 1.05, 32.11, 2.47, 16.07, 2.64, 41.37, 4.85 ], [ 41.1, 27.01, 22.43, 1.42, 22.96, 18.39, 21.03, 3.31, 47.51, 12.6 ], [ 46.15, 23.08, 15.63, 10.09, 29.78, 16.61, 11.71, 5.25, 28.76, 15.49 ], [ 48.44, 19.05, 12.58, 4.44, 30.65, 17.2, 22.99, 13.83, 24.92, 25.95 ], [ 43.75, 27.45, 7.52, 28.08, 48.03, 7.82, 28.79, 13.96, 43.92, 1.08 ], [ 23.27, 9.73, 6.63, 7.57, 46.95, 5.47, 8.81, 27.18, 43.93, 20.68 ], [ 46.11, 9.8, 19.72, 1.68, 39.37, 8.94, 7.18, 22.96, 43.29, 16.61 ], [ 37.81, 28.13, 27.44, 14.8, 38.41, 6.19, 12.98, 15.88, 34.2, 21.84 ], [ 25.45, 7.63, 13.02, 13.04, 45.67, 25.06, 18.63, 5.5, 24.81, 10.08 ], [ 31.85, 12.55, 10.13, 13.15, 23.25, 16.16, 20.33, 27.88, 36.94, 3.71 ], [ 38.29, 16.24, 22.73, 14.31, 43.97, 10.44, 26.83, 20.28, 38.77, 2.73 ], [ 27.41, 10.64, 18.83, 16.97, 31.26, 13.18, 2.64, 5.84, 35.93, 24.41 ], [ 53.9, 1.2, 28.76, 5.34, 32.91, 18.14, 1.6, 27.94, 41.53, 16.48 ], [ 42.34, 8.83, 28.06, 1.11, 21.38, 14.28, 28.54, 14.8, 45.92, 5.65 ], [ 22.59, 27.42, 2.06, 3.08, 42.51, 18.3, 21.8, 10.97, 28.17, 9.76 ], [ 24.03, 5.37, 3.06, 24.75, 26.88, 17.01, 7.32, 6.12, 53.62, 19.39 ], [ 25.21, 12.38, 4.06, 8.5, 23.66, 26.27, 6.5, 13.97, 52.23, 2.53 ], [ 21.43, 9.02, 11.43, 24.84, 45.26, 14.65, 1.01, 2.52, 21.9, 16.26 ], [ 45.57, 11.08, 20.8, 26.15, 29.2, 26.35, 9.27, 15.34, 34.89, 28.51 ], [ 46.2, 11.07, 12.05, 16.71, 45.23, 6.56, 27.86, 17.12, 51.75, 6.62 ], [ 21.35, 20.14, 19.06, 22.41, 36.04, 14.14, 1.79, 19.66, 25.71, 23.23 ], [ 49.33, 24.28, 22.3, 27.59, 53.76, 26.28, 18.32, 7.68, 47.18, 1.02 ], [ 21.78, 17.01, 23.26, 23.39, 25.3, 4.18, 2.08, 10.78, 33.78, 1.32 ], [ 30.89, 8.35, 1.4, 25.68, 28.98, 22.62, 18.18, 8.84, 37.04, 2.99 ], [ 21.55, 11.85, 24.33, 8.5, 38.4, 9.96, 10.91, 19.72, 41.95, 12.44 ], [ 52.66, 1.12, 23.65, 27.21, 37.26, 27.38, 7.5, 17.98, 52.83, 13.38 ], [ 34.72, 18.76, 28.1, 18.17, 23.51, 15.65, 7.27, 23.02, 30.09, 18.72 ], [ 30.5, 4.8, 3.22, 16.88, 31, 28.99, 26.39, 24.91, 45.3, 11.19 ], [ 43.95, 11.96, 13.07, 25, 31.82, 21.9, 17.47, 15.41, 39.76, 10.66 ], [ 48.1, 1.12, 2.47, 21.34, 43.53, 25.06, 16.5, 6.3, 28.09, 25.49 ], [ 29.38, 11.34, 22.09, 6.79, 21.35, 4.75, 1.38, 8.5, 33.73, 15.17 ], [ 21.45, 3.14, 13.84, 21.62, 34.39, 1.32, 23.04, 9.6, 33.36, 12.83 ], [ 27.55, 13.14, 1.92, 23.19, 34.43, 21.65, 6.65, 8.66, 23.27, 13.41 ], [ 42, 8.04, 10.01, 22.62, 31.02, 26.42, 10.56, 6.07, 52.1, 22.73 ], [ 50.68, 11.8, 20.5, 13.2, 49.5, 16.36, 23.47, 1.26, 52.11, 6.87 ], [ 46.9, 6.11, 9.83, 5.76, 33.14, 20.6, 22.2, 10.86, 24.28, 7.22 ], [ 23.49, 20.91, 13.9, 3.77, 41.37, 16.67, 20.85, 14.56, 41.59, 2.48 ], [ 39.56, 7.5, 22.68, 14.88, 26.3, 26.24, 22.45, 27.49, 33.75, 12.28 ], [ 41, 21.73, 26.46, 24.01, 37.7, 19.38, 23.42, 12.84, 32.01, 5.28 ], [ 28.02, 16.13, 9.13, 22.94, 29.45, 20.2, 15.4, 13.69, 45.03, 21.07 ], [ 26.45, 14.49, 20.43, 7.38, 24.85, 25.36, 2.69, 4.91, 46.74, 18.85 ], [ 35.11, 7.17, 22.37, 7.18, 35.22, 17.4, 13.75, 25.76, 37.96, 14.03 ], [ 31.8, 28.62, 4.03, 27.35, 34.46, 4.35, 14.3, 8.43, 31.49, 27.2 ], [ 52.7, 5.97, 5.7, 10.52, 44.97, 8.57, 15.36, 6.99, 47.65, 17.3 ], [ 50.45, 22.98, 24.95, 20.9, 22.78, 22.91, 17.43, 21.92, 25.3, 24.46 ], [ 21.86, 20.42, 18.33, 5.72, 29.44, 28.62, 7.23, 5.17, 37.31, 3.12 ], [ 48.8, 13.89, 15.33, 11.12, 48.7, 27.98, 19.88, 14.3, 42.36, 19.42 ], [ 28.11, 9.97, 25.52, 3.68, 48.55, 2.26, 20.76, 9.64, 42.63, 9.84 ], [ 30.79, 2.41, 13.33, 23.13, 52.03, 4.8, 13.08, 26.53, 41.04, 18.68 ], [ 45.36, 25.66, 7.86, 3.99, 45.06, 21.64, 4.39, 23.03, 23, 14.41 ] ] } }, config={ "arcSegmentsSeparation": 3, "circularAnchors2Align": "inside", "circularAnchorsAlign": "outside", "circularCenterProportion": 0.5, "circularLabelsAlign": "inside", "colorScheme": "Tableau", "colors": [ "#332288", "#6699CC", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#661100", "#CC6677", "#AA4466", "#882255", "#AA4499" ], "connections": [ [ "rgb(0,0,255)", 1, 17615830, 13, 60500000, 100000000, 20000000 ], [ "rgb(0,255,0)", 1, 2300000, 8, 13650000, 40000000, 80000000 ], [ "rgb(120,0,255)", 3, 71800000, 17, 6800000, 50000000, 25000000 ], [ "rgb(0,40,255)", 7, 71800000, 12, 5520000, 200000000, 80000000 ], [ "rgb(80,0,55)", 4, 8430000, 22, 6600000, 100000000, 50000000 ], [ "rgb(0,55,140)", 4, 3100000, 14, 64100000, 58000000, 10000000 ], [ "rgb(255,0,0)", 2, 94840000, 20, 6243500, 70000000, 30000000 ] ], "graphType": "Circular", "ringGraphType": [ "heatmap", "stacked" ], "ringsOrder": [ "chromosomes", "Annt1", "Lev:1", "anchors", "labels", "ideogram", "anchors2", "Lev:4" ], "segregateSamplesBy": [ "Factor4" ], "showIdeogram": True, "title": "Custom Plotting Order" }, width=713, height=613, events=CXEvents(), after_render=[], other_init_params={ "version": 35, "events": False, "info": False, "afterRenderInit": False, "noValidate": True } ) display = CXNoteBook(cx) display.render(output_file="circular_6.html") ```
github_jupyter
# Exploring different Coastline options in Magics This notebook will help you discover lots of posibilities for designing background of your maps in Magics. From your workstation: load magics module swap(or load) Magics/new jupyter notebook load this notebook **mcoast** controls background of our maps. Here you can set things like colours of land and sea, coastline resolution and colour, and also grid, rivers, boarders, cities etc. List of all **mcoast** parameters you can find [here](https://confluence.ecmwf.int/display/MAGP/Coastlines "Coastlines parameters") ### Import Magics and define non coastline paramters For start we will define some none coastline parameters, which we will not change later. ``` import Magics.macro as magics projection = magics.mmap( subpage_map_library_area = "on", subpage_map_area_name = 'central_europe' ) ``` As with all Magics functions, default is something you can start with. But if you don't like it, *everything* can be changed ``` coast = magics.mcoast() magics.plot(projection, coast) ``` ### High resolution coastline and dash gridlines In Magics we can fully control how land and sea look like. Coastline resolution can be 'low', 'medium' or 'high', or if you want to leave Magics to decide, you can set it as 'automatic'. Land and sea can be shaded or not, and you can set the colour. You can choose to have gridlines or not, their frequency, style, colour, thickness, labels... ``` coast = magics.mcoast( map_coastline_style = "solid", map_coastline_colour = "tan", map_coastline_resolution = "high", map_coastline_land_shade = "on", map_coastline_land_shade_colour = "cream", map_grid = "on", map_grid_colour = "tan", map_grid_latitude_increment = 5.00, map_grid_longitude_increment = 10.00, map_grid_line_style = "dash", map_label = "on", map_label_colour = "charcoal", map_label_height = 0.35, map_label_latitude_frequency = 2, map_label_longitude_frequency = 2, map_label_blanking = "off" ) magics.plot(projection, coast) ``` ### Administrative boundaries, cities and rivers ``` coast = magics.mcoast( map_boundaries = "on", map_boundaries_colour = "red", map_coastline_resolution = "high", map_coastline_land_shade_colour = "cream", map_cities = "on", map_grid = "off", map_coastline_land_shade = "on", map_coastline_colour = "tan", map_administrative_boundaries = "on", map_administrative_boundaries_countries_list = ["FRA", "ESP", "GBR"], map_administrative_boundaries_colour = "orange", map_rivers = "on" ) magics.plot(projection, coast) ``` ### Grid lines, boundaries and rivers ``` coast = magics.mcoast( map_boundaries = "on", map_boundaries_colour = "red", map_coastline_resolution = "high", map_coastline_colour = "tan", map_coastline_land_shade = "on", map_coastline_land_shade_colour = "cream", map_grid = "on", map_grid_line_style = "dot", map_grid_colour = "tan", map_grid_latitude_increment = 2.00, map_grid_longitude_increment = 2.00, map_grid_latitude_reference = 0.00, map_rivers = "on" ) magics.plot(projection, coast) ``` ### Sea, lakes and rivers ``` coast = magics.mcoast( map_coastline_sea_shade_colour = "sky", map_coastline_resolution = "high", map_rivers_colour = "sky", map_grid = "off", map_coastline_land_shade = "off", map_coastline_colour = "sky", map_coastline_sea_shade = "on", map_rivers = "on") magics.plot(projection, coast) ```
github_jupyter
# Streaming Sample: Cosmos DB ChangeFeed - Databricks In this notebook, you read a live stream of tweets that stored in Cosmos DB by leveraging Apache Spart to read the Cosmos DB's Change Feed, and run transformations on the data in Databricks cluster. ## prerequisites: - Databricks Cluster (Spark) - Cosmos DB Spark Connector (azure-cosmosdb-spark) - Create a library using maven coordinates. Simply typed in `azure-cosmosdb-spark_2.2.0` in the search box and search it, or create library by simply uploading jar file that can be donwload from marven central repository - Azure Cosmos DB Collection ## Test Feed Generator - https://github.com/tknandu/TwitterCosmosDBFeed ## LINKS - [Working with the change feed support in Azure Cosmos DB](https://docs.microsoft.com/en-us/azure/cosmos-db/change-feed) - [Twitter with Spark and Azure Cosmos DB Change Feed Sample](https://github.com/Azure/azure-cosmosdb-spark/blob/master/samples/notebooks/Twitter%20with%20Spark%20and%20Azure%20Cosmos%20DB%20Change%20Feed.ipynb) - [Stream Processing Changes using Azure Cosmos DB Change Feed and Apache Spark](https://github.com/Azure/azure-cosmosdb-spark/wiki/Stream-Processing-Changes-using-Azure-Cosmos-DB-Change-Feed-and-Apache-Spark) - https://github.com/tknandu/TwitterCosmosDBFeed ## Configure Connection to Cosmos DB Change Feed using azure-cosmosdb-spark The parameters below connect to the Cosmos DB Change Feed; for more information, please refer to Change Feed Test Runs. ``` # Adding variables rollingChangeFeed = False startFromTheBeginning = False useNextToken = True database = "changefeedsource" collection = "tweet_new" tweetsConfig = { "Endpoint" : "https://dbstreamdemo.documents.azure.com:443/", "Masterkey" : "ekRLXkETPJ93s6XZz4YubZOw1mjSnoO5Bhz1Gk29bVxCbtgtKmiyRz4SogOSxLOGTouXbwlaAHcHOzct4JVwtQ==", #"Database" : database, #"Collection" : collection, "Database" : "changefeedsource", "Collection" : "tweet_new", "ReadChangeFeed" : "true", "ChangeFeedQueryName" : database + collection + " ", "ChangeFeedStartFromTheBeginning" : str(startFromTheBeginning), "ChangeFeedUseNextToken" : str(useNextToken), "RollingChangeFeed" : str(rollingChangeFeed), #"ChangeFeedCheckpointLocation" : "./changefeedcheckpointlocation", "SamplingRatio" : "1.0" }# Adding ``` ## Read a DataFrame ``` # Read a DataFrame # SparkSession available as 'spark'. tweets = spark.read.format("com.microsoft.azure.cosmosdb.spark").options(**tweetsConfig).load() ``` ##Get the number of tweets This provides the count of tweets; it will start off 0 and then continue growing as you re-run the cell below. ``` # Get the number of tweets tweets.count() # display(tweets) # tweets.printSchema() ``` ## Create tweets TempView This way we can run SQL statements within the notebook ``` # Create tweets TempView # This way we can run SQL statements within the notebook tweets.createOrReplaceTempView("tweets") %sql select count(1) from tweets ``` ## Show various attributes of the first 20 tweets ``` %sql select id, created_at, user.screen_name, user.location, text, retweet_count, entities.hashtags, entities.user_mentions, favorited, source from tweets limit 20 ``` ## Determine Top 10 hashtags for the tweets ``` %sql select concat(concat((dense_rank() OVER (PARTITION BY 1 ORDER BY tweets DESC)-1), '. '), text) as hashtags, tweets from ( select hashtags.text, count(distinct id) as tweets from ( select explode(entities.hashtags) as hashtags, id from tweets ) a group by hashtags.text order by tweets desc limit 10 ) b ``` # [APPENDIX] Connnecting to Cosmos DB using pydocumentdb ``` # Import Necessary Libraries import pydocumentdb from pydocumentdb import document_client from pydocumentdb import documents import datetime # Configuring the connection policy (allowing for endpoint discovery) connectionPolicy = documents.ConnectionPolicy() connectionPolicy.EnableEndpointDiscovery connectionPolicy.PreferredLocations = ["Japan East", "Japan West"] # Set keys to connect to Cosmos DB masterKey = 'b3KPBHQvWTD8prYsQDiHlaM8kDzBholipD1sgshjT60ayDK9WkvRAT0Qywsi5FkcyKsYcvF4iIrUEBBzaZwJKw==' host = 'https://videoanalytics.documents.azure.com:443/' client = document_client.DocumentClient(host, {'masterKey': masterKey}, connectionPolicy) # Configure Database and Collections databaseId = 'asset' collectionId = 'meta' # Configurations the Cosmos DB client will use to connect to the database and collection dbLink = 'dbs/' + databaseId collLink = dbLink + '/colls/' + collectionId # Set query parameter #querystr = "SELECT c.City FROM c WHERE c.State='WA'" querystr= "SELECT * FROM c" # Query documents query = client.QueryDocuments(collLink, querystr, options=None, partition_key=None) # Query for partitioned collections # query = client.QueryDocuments(collLink, query, options= { 'enableCrossPartitionQuery': True }, partition_key=None) # Push into list `elements` elements = list(query) print(elements) ```
github_jupyter
# Deep Neural Network You see a lot of people around you who are interested in deep neural networks and you think that it might be interesting to start thinking about creating a software that is as flexible as possible and allows novice users to test this kind of methods. You have no previous knowledge and while searching a bit on the internet, you come across this project https://github.com/HyTruongSon/Neural-Network-MNIST-CPP. You say to yourself that this is a good starting point and decide to spend a bit more time on it. We recall here the key elements found in deep neural networks. We will not go into the mathematical details as this is not the purpose of this course. A deep neurl network is composed of an input, an output and several hidden layers. A neuron is illustrated by the following figure ![image](./figures/dnn1.png) This figure comes from a CNRS course called fiddle (https://gricad-gitlab.univ-grenoble-alpes.fr/talks/fidle). We can observe that a neuron is made of weights, a bias and an activation function. The activation function can be a sigmoid, reLU, tanh, ... A deep neural network is composed of several hidden layers with several neurons as illustrated in the following figure ![image](./figures/dnn2.png) This figure also comes from the CNRS course fiddle. In the following, we will use these notations: - $w^l_{j,i}$ is the weight of the layer $l$ for the neuron $j$ and the input entry $i$. - $z^l_j$ is the aggregation: $\sum_i x_{i}^l w_{j, i}^l + b_j^l$ where $x_{i}$ is the input. - $\sigma$ is the activation function. - $a^l_j$ is the output of the neuron $j$ for the layer $l$. - $L$ is the index of the last layer. - $C(a^L, y)$ is the cost function where $a^L$ is the predict value and $y$ is the expected result. The algorithm has three steps: - The forward propagation: for a given input, cross all the layers until the output and fill $z^l$ and $a^l$. - Change the weights and biases to minimize the cost function using a descent gradient. This is called backward propagation. - iterate until reaching the maximum number of iterations or a given tolerance. The gradient descent can be written as $$ w_{j, i}^l = w_{j, i}^l - \mu \frac{\partial C}{\partial w_{j, i}^l}, $$ where $\mu$ is the learning rate. The equations of the backward propagation are - $\delta^L_j = \frac{\partial C}{\partial a_j^L}\sigma'(z_j^L)$ - $\delta^l_j = \sum_i w^{l+1}_{i, j}\delta^{l+1}_i \sigma'(z_j^l)$ - $\frac{\partial C}{\partial b^l_j} = \delta_j^l$ - $\frac{\partial C}{\partial w^l_{j, i}} = a^{l-1}_i \delta_j^l$ In our case, $$ C'(\hat{y}, y) = \hat{y} - y. $$ We need to set of datas: datas for training the neural network and datas for testing the final weights and biases. - Read the code https://github.com/HyTruongSon/Neural-Network-MNIST-CPP carefully and try to recognize each element of the algorithm. - Think of a code organization and data structure that offer more flexibility and readability. - Duplicate `step_0` into `step_1` and add all the `CMakeLists.twt` to create a library of `dnn` source files and the executable of the main function - Duplicate `step_1` into `step_2` and implement the following functions - `forward_propagation` - `backward_propagation` - `evaluate` - How to proceed to have more flexibility in the choice of the activation function ? **Note**: for the moment, you have only seen the C++ functions. We can see that it is difficult to have a flexible implementation with only functions. The use of C++ classes will improve the implementation considerably and will allow to add several activation and cost functions more easily.
github_jupyter
``` from __future__ import print_function, division import json import numpy as np import pandas as pd import librosa import soundfile as sf import torch from torch.utils.data import Dataset from keras.preprocessing.sequence import pad_sequences # Ignore warnings import warnings warnings.filterwarnings("ignore") class SpeechDataset(Dataset): """Speech dataset.""" def __init__(self, csv_file, labels_file, audio_conf, transform=None, normalize=True): """ Args: csv_file (string): Path to the csv file contain audio and transcript path. labels_file (string): Path to the json file contain label dictionary. audio_conf (dict) : Audio config info. transform (callable, optional): Optional transform to be applied on a sample. """ self.speech_frame = pd.read_csv(csv_file, header=None) with open(labels_file, 'r') as f: self.labels = json.loads(f.read()) self.window = audio_conf['window'] self.window_size = audio_conf['window_size'] self.window_stride = audio_conf['window_stride'] self.sampling_rate = audio_conf['sampling_rate'] self.transform = transform self.normalize = normalize def __len__(self): return len(self.speech_frame) def __getitem__(self, idx): wav_file = self.speech_frame.iloc[idx, 0] transcript_file = self.speech_frame.iloc[idx, 1] try: signal, _ = sf.read(wav_file) signal /= 1 << 31 signal = self.spectrogram(signal) with open(transcript_file, 'r') as f: transcript = f.read().strip() transcript_idx = [] transcript_idx.append(self.labels['<sos>']) for char in list(transcript): if char in self.labels: transcript_idx.append(self.labels[char]) transcript_idx.append(self.labels['<eos>']) sample = {'signal': signal, 'transcript': np.array(transcript_idx)} if self.transform: sample = self.transform(sample) return sample except: return wav_file def spectrogram(self, signal): n_fft = int(self.sampling_rate * self.window_size) win_length = n_fft hop_length = int(self.sampling_rate * self.window_stride) # STFT D = librosa.stft(signal, n_fft=n_fft, hop_length=hop_length, window=self.window, win_length=win_length) spect, phase = librosa.magphase(D) # S = log(S+1) spect = np.log1p(spect) spect = torch.FloatTensor(spect) if self.normalize: mean = spect.mean() std = spect.std() spect.add_(-mean) spect.div_(std) return spect class Padding(object): """Rescale the audio signal and transcript to a given size. Args: signal_size (int): Desired output size of signal. transcript_size (int): Desired output size of transcript. labels_file (string): Path to the json file contain label dictionary. """ def __init__(self, signal_size, transcript_size, labels_file): assert isinstance(signal_size, (int)) assert isinstance(transcript_size, (int)) self.signal_size = signal_size self.transcript_size = transcript_size with open(labels_file, 'r') as f: self.labels = json.loads(f.read()) def __call__(self, sample): signal, transcript = sample['signal'], sample['transcript'] signal /= 1 << 31 signal = pad_sequences(signal, maxlen=self.signal_size, padding='post', truncating='post', value=0.0, dtype='float') transcript = pad_sequences(transcript.reshape(1, -1), maxlen=self.transcript_size, padding='post', truncating='post', value=self.labels['pad'], dtype='int') return {'signal': signal, 'transcript': transcript} class ToTensor(object): """Convert ndarrays in sample to Tensors.""" def __call__(self, sample): signal, transcript = sample['signal'], sample['transcript'] return {'signal': torch.from_numpy(signal), 'transcript': torch.from_numpy(transcript)} ```
github_jupyter
# CatBoostRegressor with RobustScaler This Code template is for regression analysis using CatBoostRegressor and Robust Scaler Feature Scaling technique. CatBoost is an algorithm for gradient boosting on decision trees. <img src="https://cdn.blobcity.com/assets/gpu_recommended.png" height="25" style="margin-bottom:-15px" /> ### Required Packages ``` !pip install catboost import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se from sklearn.preprocessing import LabelEncoder, RobustScaler from sklearn.model_selection import train_test_split from catboost import CatBoostRegressor from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error warnings.filterwarnings('ignore') ``` ### Initialization Filepath of CSV file ``` #filepath file_path= '' ``` List of features which are required for model training . ``` #x_values features=[] ``` Target feature for prediction. ``` #y_value target='' ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) df.head() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X and target/outcome to Y. ``` X = df[features] Y = df[target] ``` ### Data Preprocessing Since the majority of the machine learning models doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=NullClearner(Y) X.head() ``` #### Correlation Map In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ``` f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ``` ### Data Splitting The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ``` x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123) ``` ### Data Rescaling It scales features using statistics that are robust to outliers. This method removes the median and scales the data in the range between 1st quartile and 3rd quartile. i.e., in between 25th quantile and 75th quantile range. This range is also called an Interquartile range. <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html">More about Robust Scaler</a> ``` robust = RobustScaler() x_train = robust.fit_transform(x_train) x_test = robust.transform(x_test) ``` ### Model CatBoost is an algorithm for gradient boosting on decision trees. Developed by Yandex researchers and engineers, it is the successor of the MatrixNet algorithm that is widely used within the company for ranking tasks, forecasting and making recommendations #### Tuning parameters 1. **learning_rate**:, float, default = it is defined automatically for Logloss, MultiClass & RMSE loss functions depending on the number of iterations if none of these parameters is set >The learning rate. Used for reducing the gradient step. 2. **l2_leaf_reg**: float, default = 3.0 >Coefficient at the L2 regularization term of the cost function. Any positive value is allowed. 3. **bootstrap_type**: string, default = depends on the selected mode and processing unit >Bootstrap type. Defines the method for sampling the weights of objects. * Supported methods: * Bayesian * Bernoulli * MVS * Poisson (supported for GPU only) * No 4. **subsample**: float, default = depends on the dataset size and the bootstrap type >Sample rate for bagging. This parameter can be used if one of the following bootstrap types is selected: * Poisson * Bernoulli * MVS For more information refer: [API](https://catboost.ai/docs/concepts/python-reference_catboostregressor.html) ``` # Build Model here model = CatBoostRegressor(verbose=False) model.fit(x_train, y_train) ``` #### Model Accuracy score() method return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. ``` print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100)) ``` > **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. > **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. > **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ``` y_pred=model.predict(x_test) print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100)) print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred))) print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred))) ``` #### Prediction Plot First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis. For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis. ``` plt.figure(figsize=(14,10)) plt.plot(range(20),y_test[0:20], color = "green") plt.plot(range(20),model.predict(x_test[0:20]), color = "red") plt.legend(["Actual","prediction"]) plt.title("Predicted vs True Value") plt.xlabel("Record number") plt.ylabel(target) plt.show() ``` ## Creator: Abhishek Garg, Github: [Profile](https://github.com/abhishek-252)
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Distributed Training in TensorFlow <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r1/guide/distribute_strategy.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r1/guide/distribute_strategy.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> > Note: This is an archived TF1 notebook. These are configured to run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate) but will run in TF1 as well. To use TF1 in Colab, use the [%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb) magic. ## Overview `tf.distribute.Strategy` is a TensorFlow API to distribute training across multiple GPUs, multiple machines or TPUs. Using this API, users can distribute their existing models and training code with minimal code changes. `tf.distribute.Strategy` has been designed with these key goals in mind: * Easy to use and support multiple user segments, including researchers, ML engineers, etc. * Provide good performance out of the box. * Easy switching between strategies. `tf.distribute.Strategy` can be used with TensorFlow's high level APIs, [tf.keras](https://www.tensorflow.org/r1/guide/keras) and [tf.estimator](https://www.tensorflow.org/r1/guide/estimators), with just a couple of lines of code change. It also provides an API that can be used to distribute custom training loops (and in general any computation using TensorFlow). In TensorFlow 2.0, users can execute their programs eagerly, or in a graph using [`tf.function`](../tutorials/eager/tf_function.ipynb). `tf.distribute.Strategy` intends to support both these modes of execution. Note that we may talk about training most of the time in this guide, but this API can also be used for distributing evaluation and prediction on different platforms. As you will see in a bit, very few changes are needed to use `tf.distribute.Strategy` with your code. This is because we have changed the underlying components of TensorFlow to become strategy-aware. This includes variables, layers, models, optimizers, metrics, summaries, and checkpoints. In this guide, we will talk about various types of strategies and how one can use them in different situations. Note: For a deeper understanding of the concepts, please watch [this deep-dive presentation](https://youtu.be/jKV53r9-H14). This is especially recommended if you plan to write your own training loop. ``` import tensorflow.compat.v1 as tf tf.disable_v2_behavior() ``` ## Types of strategies `tf.distribute.Strategy` intends to cover a number of use cases along different axes. Some of these combinations are currently supported and others will be added in the future. Some of these axes are: * Syncronous vs asynchronous training: These are two common ways of distributing training with data parallelism. In sync training, all workers train over different slices of input data in sync, and aggregating gradients at each step. In async training, all workers are independently training over the input data and updating variables asynchronously. Typically sync training is supported via all-reduce and async through parameter server architecture. * Hardware platform: Users may want to scale their training onto multiple GPUs on one machine, or multiple machines in a network (with 0 or more GPUs each), or on Cloud TPUs. In order to support these use cases, we have 4 strategies available. In the next section we will talk about which of these are supported in which scenarios in TF. ### MirroredStrategy `tf.distribute.MirroredStrategy` support synchronous distributed training on multiple GPUs on one machine. It creates one model replica per GPU device. Each variable in the model is mirrored across all the replicas. Together, these variables form a single conceptual variable called `MirroredVariable`. These variables are kept in sync with each other by applying identical updates. Efficient all-reduce algorithms are used to communicate the variable updates across the devices. All-reduce aggregates tensors across all the devices by adding them up, and makes them available on each device. It’s a fused algorithm that is very efficient and can reduce the overhead of synchronization significantly. There are many all-reduce algorithms and implementations available, depending on the type of communication available between devices. By default, it uses NVIDIA NCCL as the all-reduce implementation. The user can also choose between a few other options we provide, or write their own. Here is the simplest way of creating `MirroredStrategy`: ``` mirrored_strategy = tf.distribute.MirroredStrategy() ``` This will create a `MirroredStrategy` instance which will use all the GPUs that are visible to TensorFlow, and use NCCL as the cross device communication. If you wish to use only some of the GPUs on your machine, you can do so like this: ``` mirrored_strategy = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"]) ``` If you wish to override the cross device communication, you can do so using the `cross_device_ops` argument by supplying an instance of `tf.distribute.CrossDeviceOps`. Currently we provide `tf.distribute.HierarchicalCopyAllReduce` and `tf.distribute.ReductionToOneDevice` as 2 other options other than `tf.distribute.NcclAllReduce` which is the default. ``` mirrored_strategy = tf.distribute.MirroredStrategy( cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()) ``` ### CentralStorageStrategy `tf.distribute.experimental.CentralStorageStrategy` does synchronous training as well. Variables are not mirrored, instead they are placed on the CPU and operations are replicated across all local GPUs. If there is only one GPU, all variables and operations will be placed on that GPU. Create a `CentralStorageStrategy` by: ``` central_storage_strategy = tf.distribute.experimental.CentralStorageStrategy() ``` This will create a `CentralStorageStrategy` instance which will use all visible GPUs and CPU. Update to variables on replicas will be aggragated before being applied to variables. Note: This strategy is [`experimental`](https://www.tensorflow.org/r1/guide/version_compat#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future. ### MultiWorkerMirroredStrategy `tf.distribute.experimental.MultiWorkerMirroredStrategy` is very similar to `MirroredStrategy`. It implements synchronous distributed training across multiple workers, each with potentially multiple GPUs. Similar to `MirroredStrategy`, it creates copies of all variables in the model on each device across all workers. It uses [CollectiveOps](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/collective_ops.py) as the multi-worker all-reduce communication method used to keep variables in sync. A collective op is a single op in the TensorFlow graph which can automatically choose an all-reduce algorithm in the TensorFlow runtime according to hardware, network topology and tensor sizes. It also implements additional performance optimizations. For example, it includes a static optimization that converts multiple all-reductions on small tensors into fewer all-reductions on larger tensors. In addition, we are designing it to have a plugin architecture - so that in the future, users will be able to plugin algorithms that are better tuned for their hardware. Note that collective ops also implement other collective operations such as broadcast and all-gather. Here is the simplest way of creating `MultiWorkerMirroredStrategy`: ``` multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() ``` `MultiWorkerMirroredStrategy` currently allows you to choose between two different implementations of collective ops. `CollectiveCommunication.RING` implements ring-based collectives using gRPC as the communication layer. `CollectiveCommunication.NCCL` uses [Nvidia's NCCL](https://developer.nvidia.com/nccl) to implement collectives. `CollectiveCommunication.AUTO` defers the choice to the runtime. The best choice of collective implementation depends upon the number and kind of GPUs, and the network interconnect in the cluster. You can specify them like so: ``` multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( tf.distribute.experimental.CollectiveCommunication.NCCL) ``` One of the key differences to get multi worker training going, as compared to multi-GPU training, is the multi-worker setup. "TF_CONFIG" environment variable is the standard way in TensorFlow to specify the cluster configuration to each worker that is part of the cluster. See section on ["TF_CONFIG" below](#TF_CONFIG) for more details on how this can be done. Note: This strategy is [`experimental`](https://www.tensorflow.org/r1/guide/version_compat#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future. ### TPUStrategy `tf.distribute.experimental.TPUStrategy` lets users run their TensorFlow training on Tensor Processing Units (TPUs). TPUs are Google's specialized ASICs designed to dramatically accelerate machine learning workloads. They are available on Google Colab, the [TensorFlow Research Cloud](https://www.tensorflow.org/tfrc) and [Google Compute Engine](https://cloud.google.com/tpu). In terms of distributed training architecture, TPUStrategy is the same `MirroredStrategy` - it implements synchronous distributed training. TPUs provide their own implementation of efficient all-reduce and other collective operations across multiple TPU cores, which are used in `TPUStrategy`. Here is how you would instantiate `TPUStrategy`. Note: To run this code in Colab, you should select TPU as the Colab runtime. See [Using TPUs]( tpu.ipynb) guide for a runnable version. ``` resolver = tf.distribute.cluster_resolver.TPUClusterResolver() tf.tpu.experimental.initialize_tpu_system(resolver) tpu_strategy = tf.distribute.experimental.TPUStrategy(resolver) ``` `TPUClusterResolver` instance helps locate the TPUs. In Colab, you don't need to specify any arguments to it. If you want to use this for Cloud TPUs, you will need to specify the name of your TPU resource in `tpu` argument. We also need to initialize the tpu system explicitly at the start of the program. This is required before TPUs can be used for computation and should ideally be done at the beginning because it also wipes out the TPU memory so all state will be lost. Note: This strategy is [`experimental`](https://www.tensorflow.org/r1/guide/version_compat#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future. ### ParameterServerStrategy `tf.distribute.experimental.ParameterServerStrategy` supports parameter servers training on multiple machines. In this setup, some machines are designated as workers and some as parameter servers. Each variable of the model is placed on one parameter server. Computation is replicated across all GPUs of all the workers. In terms of code, it looks similar to other strategies: ``` ps_strategy = tf.distribute.experimental.ParameterServerStrategy() ``` For multi worker training, "TF_CONFIG" needs to specify the configuration of parameter servers and workers in your cluster, which you can read more about in [TF_CONFIG](#TF_CONFIG) below. So far we've talked about what are the different stategies available and how you can instantiate them. In the next few sections, we will talk about the different ways in which you can use them to distribute your training. We will show short code snippets in this guide and link off to full tutorials which you can run end to end. ## Using `tf.distribute.Strategy` with Keras We've integrated `tf.distribute.Strategy` into `tf.keras` which is TensorFlow's implementation of the [Keras API specification](https://keras.io). `tf.keras` is a high-level API to build and train models. By integrating into `tf.keras` backend, we've made it seamless for Keras users to distribute their training written in the Keras training framework. The only things that need to change in a user's program are: (1) Create an instance of the appropriate `tf.distribute.Strategy` and (2) Move the creation and compiling of Keras model inside `strategy.scope`. Here is a snippet of code to do this for a very simple Keras model with one dense layer: ``` mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))]) model.compile(loss='mse', optimizer='sgd') ``` In this example we used `MirroredStrategy` so we can run this on a machine with multiple GPUs. `strategy.scope()` indicated which parts of the code to run distributed. Creating a model inside this scope allows us to create mirrored variables instead of regular variables. Compiling under the scope allows us to know that the user intends to train this model using this strategy. Once this is setup, you can fit your model like you would normally. `MirroredStrategy` takes care of replicating the model's training on the available GPUs, aggregating gradients etc. ``` dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100).batch(10) model.fit(dataset, epochs=2) model.evaluate(dataset) ``` Here we used a `tf.data.Dataset` to provide the training and eval input. You can also use numpy arrays: ``` import numpy as np inputs, targets = np.ones((100, 1)), np.ones((100, 1)) model.fit(inputs, targets, epochs=2, batch_size=10) ``` In both cases (dataset or numpy), each batch of the given input is divided equally among the multiple replicas. For instance, if using `MirroredStrategy` with 2 GPUs, each batch of size 10 will get divided among the 2 GPUs, with each receiving 5 input examples in each step. Each epoch will then train faster as you add more GPUs. Typically, you would want to increase your batch size as you add more accelerators so as to make effective use of the extra computing power. You will also need to re-tune your learning rate, depending on the model. You can use `strategy.num_replicas_in_sync` to get the number of replicas. ``` # Compute global batch size using number of replicas. BATCH_SIZE_PER_REPLICA = 5 global_batch_size = (BATCH_SIZE_PER_REPLICA * mirrored_strategy.num_replicas_in_sync) dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100) dataset = dataset.batch(global_batch_size) LEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15} learning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size] ``` ### What's supported now? In [TF nightly release](https://pypi.org/project/tf-nightly-gpu/), we now support training with Keras using all strategies. Note: When using `TPUStrategy` with TPU pods with Keras, currently the user will have to explicitly shard or shuffle the data for different workers, but we will change this in the future to automatically shard the input data intelligently. ### Examples and Tutorials Here is a list of tutorials and examples that illustrate the above integration end to end with Keras: 1. [Tutorial](../tutorials/distribute/keras.ipynb) to train MNIST with `MirroredStrategy`. 2. Official [ResNet50](https://github.com/tensorflow/models/blob/master/official/vision/image_classification/resnet_imagenet_main.py) training with ImageNet data using `MirroredStrategy`. 3. [ResNet50](https://github.com/tensorflow/tpu/blob/master/models/experimental/resnet50_keras/resnet50.py) trained with Imagenet data on Cloud TPus with `TPUStrategy`. ## Using `tf.distribute.Strategy` with Estimator `tf.estimator` is a distributed training TensorFlow API that originally supported the async parameter server approach. Like with Keras, we've integrated `tf.distribute.Strategy` into `tf.Estimator` so that a user who is using Estimator for their training can easily change their training is distributed with very few changes to your their code. With this, estimator users can now do synchronous distributed training on multiple GPUs and multiple workers, as well as use TPUs. The usage of `tf.distribute.Strategy` with Estimator is slightly different than the Keras case. Instead of using `strategy.scope`, now we pass the strategy object into the [`RunConfig`](https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig) for the Estimator. Here is a snippet of code that shows this with a premade estimator `LinearRegressor` and `MirroredStrategy`: ``` mirrored_strategy = tf.distribute.MirroredStrategy() config = tf.estimator.RunConfig( train_distribute=mirrored_strategy, eval_distribute=mirrored_strategy) regressor = tf.estimator.LinearRegressor( feature_columns=[tf.feature_column.numeric_column('feats')], optimizer='SGD', config=config) ``` We use a premade Estimator here, but the same code works with a custom Estimator as well. `train_distribute` determines how training will be distributed, and `eval_distribute` determines how evaluation will be distributed. This is another difference from Keras where we use the same strategy for both training and eval. Now we can train and evaluate this Estimator with an input function: ``` def input_fn(): dataset = tf.data.Dataset.from_tensors(({"feats":[1.]}, [1.])) return dataset.repeat(1000).batch(10) regressor.train(input_fn=input_fn, steps=10) regressor.evaluate(input_fn=input_fn, steps=10) ``` Another difference to highlight here between Estimator and Keras is the input handling. In Keras, we mentioned that each batch of the dataset is split across the multiple replicas. In Estimator, however, the user provides an `input_fn` and have full control over how they want their data to be distributed across workers and devices. We do not do automatic splitting of batch, nor automatically shard the data across different workers. The provided `input_fn` is called once per worker, thus giving one dataset per worker. Then one batch from that dataset is fed to one replica on that worker, thereby consuming N batches for N replicas on 1 worker. In other words, the dataset returned by the `input_fn` should provide batches of size `PER_REPLICA_BATCH_SIZE`. And the global batch size for a step can be obtained as `PER_REPLICA_BATCH_SIZE * strategy.num_replicas_in_sync`. When doing multi worker training, users will also want to either split their data across the workers, or shuffle with a random seed on each. You can see an example of how to do this in the [Multi-worker Training with Estimator](../tutorials/distribute/multi_worker_with_estimator.ipynb). We showed an example of using `MirroredStrategy` with Estimator. You can also use `TPUStrategy` with Estimator as well, in the exact same way: ``` config = tf.estimator.RunConfig( train_distribute=tpu_strategy, eval_distribute=tpu_strategy) ``` And similarly, you can use multi worker and parameter server strategies as well. The code remains the same, but you need to use `tf.estimator.train_and_evaluate`, and set "TF_CONFIG" environment variables for each binary running in your cluster. ### What's supported now? In TF nightly release, we support training with Estimator using all strategies. ### Examples and Tutorials Here are some examples that show end to end usage of various strategies with Estimator: 1. [End to end example](https://github.com/tensorflow/ecosystem/tree/master/distribution_strategy) for multi worker training in tensorflow/ecosystem using Kuberentes templates. This example starts with a Keras model and converts it to an Estimator using the `tf.keras.estimator.model_to_estimator` API. 2. Official [ResNet50](https://github.com/tensorflow/models/blob/master/official/r1/resnet/imagenet_main.py) model, which can be trained using either `MirroredStrategy` or `MultiWorkerMirroredStrategy`. 3. [ResNet50](https://github.com/tensorflow/tpu/blob/master/models/experimental/distribution_strategy/resnet_estimator.py) example with TPUStrategy. ## Using `tf.distribute.Strategy` with custom training loops As you've seen, using `tf.distrbute.Strategy` with high level APIs is only a couple lines of code change. With a little more effort, `tf.distrbute.Strategy` can also be used by other users who are not using these frameworks. TensorFlow is used for a wide variety of use cases and some users (such as researchers) require more flexibility and control over their training loops. This makes it hard for them to use the high level frameworks such as Estimator or Keras. For instance, someone using a GAN may want to take a different number of generator or discriminator steps each round. Similarly, the high level frameworks are not very suitable for Reinforcement Learning training. So these users will usually write their own training loops. For these users, we provide a core set of methods through the `tf.distrbute.Strategy` classes. Using these may require minor restructuring of the code initially, but once that is done, the user should be able to switch between GPUs / TPUs / multiple machines by just changing the strategy instance. Here we will show a brief snippet illustrating this use case for a simple training example using the same Keras model as before. Note: These APIs are still experimental and we are improving them to make them more user friendly. First, we create the model and optimizer inside the strategy's scope. This ensures that any variables created with the model and optimizer are mirrored variables. ``` with mirrored_strategy.scope(): model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))]) optimizer = tf.train.GradientDescentOptimizer(0.1) ``` Next, we create the input dataset and call `tf.distribute.Strategy.experimental_distribute_dataset` to distribute the dataset based on the strategy. ``` dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(1000).batch( global_batch_size) dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset) ``` Then, we define one step of the training. We will use `tf.GradientTape` to compute gradients and optimizer to apply those gradients to update our model's variables. To distribute this training step, we put it in a function `step_fn` and pass it to `tf.distribute.Strategy.run` along with the inputs from the iterator: ``` def train_step(dist_inputs): def step_fn(inputs): features, labels = inputs logits = model(features) cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=labels) loss = tf.reduce_sum(cross_entropy) * (1.0 / global_batch_size) train_op = optimizer.minimize(loss) with tf.control_dependencies([train_op]): return tf.identity(loss) per_replica_losses = mirrored_strategy.run( step_fn, args=(dist_inputs,)) mean_loss = mirrored_strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None) return mean_loss ``` A few other things to note in the code above: 1. We used `tf.nn.softmax_cross_entropy_with_logits` to compute the loss. And then we scaled the total loss by the global batch size. This is important because all the replicas are training in sync and number of examples in each step of training is the global batch. So the loss needs to be divided by the global batch size and not by the replica (local) batch size. 2. We used the `strategy.reduce` API to aggregate the results returned by `tf.distribute.Strategy.run`. `tf.distribute.Strategy.run` returns results from each local replica in the strategy, and there are multiple ways to consume this result. You can `reduce` them to get an aggregated value. You can also do `tf.distribute.Strategy.experimental_local_results(results)`to get the list of values contained in the result, one per local replica. Finally, once we have defined the training step, we can initialize the iterator and variables and run the training in a loop: ``` with mirrored_strategy.scope(): input_iterator = dist_dataset.make_initializable_iterator() iterator_init = input_iterator.initializer var_init = tf.global_variables_initializer() loss = train_step(input_iterator.get_next()) with tf.Session() as sess: sess.run([var_init, iterator_init]) for _ in range(10): print(sess.run(loss)) ``` In the example above, we used `tf.distribute.Strategy.experimental_distribute_dataset` to provide input to your training. We also provide the `tf.distribute.Strategy.make_experimental_numpy_dataset` to support numpy inputs. You can use this API to create a dataset before calling `tf.distribute.Strategy.experimental_distribute_dataset`. This covers the simplest case of using `tf.distribute.Strategy` API to distribute custom training loops. We are in the process of improving these APIs. Since this use case requires more work on the part of the user, we will be publishing a separate detailed guide in the future. ### What's supported now? In TF nightly release, we support training with custom training loops using `MirroredStrategy` and `TPUStrategy` as shown above. Support for other strategies will be coming in soon. `MultiWorkerMirorredStrategy` support will be coming in the future. ### Examples and Tutorials Here are some examples for using distribution strategy with custom training loops: 1. [Example](https://github.com/tensorflow/tensorflow/blob/5456cc28f3f8d9c17c645d9a409e495969e584ae/tensorflow/contrib/distribute/python/examples/mnist_tf1_tpu.py) to train MNIST using `TPUStrategy`. ## Other topics In this section, we will cover some topics that are relevant to multiple use cases. <a id="TF_CONFIG"> ### Setting up TF\_CONFIG environment variable </a> For multi-worker training, as mentioned before, you need to set "TF\_CONFIG" environment variable for each binary running in your cluster. The "TF\_CONFIG" environment variable is a JSON string which specifies what tasks constitute a cluster, their addresses and each task's role in the cluster. We provide a Kubernetes template in the [tensorflow/ecosystem](https://github.com/tensorflow/ecosystem) repo which sets "TF\_CONFIG" for your training tasks. One example of "TF\_CONFIG" is: ``` os.environ["TF_CONFIG"] = json.dumps({ "cluster": { "worker": ["host1:port", "host2:port", "host3:port"], "ps": ["host4:port", "host5:port"] }, "task": {"type": "worker", "index": 1} }) ``` This "TF\_CONFIG" specifies that there are three workers and two ps tasks in the cluster along with their hosts and ports. The "task" part specifies that the role of the current task in the cluster, worker 1 (the second worker). Valid roles in a cluster is "chief", "worker", "ps" and "evaluator". There should be no "ps" job except when using `tf.distribute.experimental.ParameterServerStrategy`. ## What's next? `tf.distribute.Strategy` is actively under development. We welcome you to try it out and provide your feedback via [issues on GitHub](https://github.com/tensorflow/tensorflow/issues/new).
github_jupyter
``` from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf from tensorflow import keras from os import listdir, path import numpy as np from collections import defaultdict import datetime import random random.seed(42) # Keep the order stable everytime shuffling the files while creating training datasets ``` ## Global variables ``` seq_length = 36 # This will be used to keep the fixed input size for the first CNN layer dim = 6 # Number of datapoints in a single reading accX,accY,accZ,gyrX,gyrY,gyrZ num_classes = 10 # Number of output classes [0,9] ``` ## Sequence Padding #### When collecting sequence data, individual samples have different lengths. Since the input data for a convolutional neural network must be a single tensor, samples need to be padded. The sequence are padded at the beginning and at the end with neighboring values. ``` def padding(data): padded_data = [] noise_level = [ 20, 20, 20, 0.2, 0.2, 0.2 ] tmp_data = (np.random.rand(seq_length, dim) - 0.5) * noise_level + data[0] tmp_data[(seq_length - min(len(data), seq_length)):] = data[:min(len(data), seq_length)] padded_data.append(tmp_data) tmp_data = (np.random.rand(seq_length, dim) - 0.5) * noise_level + data[-1] tmp_data[:min(len(data), seq_length)] = data[:min(len(data), seq_length)] padded_data.append(tmp_data) return padded_data ``` ## Convert to TensorFlow dataset, keeps data and labels together ``` def build_dataset(data, label): # Add 2 padding, initialize data and label padded_num = 2 length = len(data) * padded_num features = np.zeros((length, seq_length, dim)) labels = np.zeros(length) # Get padding for train, valid and test for idx, (data, label) in enumerate(zip(data, label)): padded_data = padding(data) for num in range(padded_num): features[padded_num * idx + num] = padded_data[num] labels[padded_num * idx + num] = label # Turn into tf.data.Dataset dataset = tf.data.Dataset.from_tensor_slices((features, labels.astype("int32"))) return length, dataset ``` ## Time Warping ``` def time_warping(molecule, denominator, data): tmp_data = [[0 for i in range(len(data[0]))] for j in range((int(len(data) / molecule) - 1) * denominator)] for i in range(int(len(data) / molecule) - 1): for j in range(len(data[i])): for k in range(denominator): tmp_data[denominator * i + k][j] = (data[molecule * i + k][j] * (denominator - k) + data[molecule * i + k + 1][j] * k) / denominator return tmp_data ``` ## Data augmentation ``` def augment_data(original_data, original_label): new_data = [] new_label = [] for idx, (data, label) in enumerate(zip(original_data, original_label)): # pylint: disable=unused-variable # Original data new_data.append(data) new_label.append(label) # Shift Sequence for num in range(5): # pylint: disable=unused-variable new_data.append((np.array(data, dtype=np.float32) + (random.random() - 0.5) * 200).tolist()) new_label.append(label) # Add Random noise tmp_data = [[0 for i in range(len(data[0]))] for j in range(len(data))] for num in range(5): for i in range(len(tmp_data)): for j in range(len(tmp_data[i])): tmp_data[i][j] = data[i][j] + 5 * random.random() new_data.append(tmp_data) new_label.append(label) # Time warping fractions = [(3, 2), (5, 3), (2, 3), (3, 4), (9, 5), (6, 5), (4, 5)] for molecule, denominator in fractions: new_data.append(time_warping(molecule, denominator, data)) new_label.append(label) # Movement amplification for molecule, denominator in fractions: new_data.append( (np.array(data, dtype=np.float32) * molecule / denominator).tolist()) new_label.append(label) return new_data, new_label ``` ## Load data from files ``` def load_data(data_type, files): data = [] labels = [] random.shuffle(files) for file in files: with open(file) as f: label = path.splitext(file)[0][-1] labels.append(label) readings = [] for line in f: reading = line.strip().split(',') readings.append([float(i) for i in reading[0:6]]) data.append(readings) if data_type == 'train': data, labels = augment_data(data, labels) return build_dataset(data, labels) ``` ## Prepare training, validation, and test datasets ``` files_path = defaultdict(list) dir = './data' for filename in listdir(dir): if filename.endswith('.csv'): digit = path.splitext(filename)[0][-1] files_path[digit].append(path.join(dir, filename)) train_files = [] validation_files = [] test_files = [] for digit in files_path: random.shuffle(files_path[digit]) train_split = int(len(files_path[digit]) * 0.6) # 60% validation_split = train_split + int(len(files_path[digit]) * 0.2) # 20% train_files += files_path[digit][:train_split] validation_files += files_path[digit][train_split:validation_split] # remaining 20% test_files += files_path[digit][validation_split:] train_length, train_data = load_data('train', train_files) validation_length, validation_data = load_data('validation', validation_files) test_length, test_data = load_data('test', test_files ) print('train_length={} validation_length={} test_length{}'.format(train_length, validation_length, test_length)) ``` ## Build a sequential model ``` model = tf.keras.Sequential([ tf.keras.layers.Conv2D(8, (3, 3), padding="same", activation="relu", input_shape=(seq_length, dim, 1)), tf.keras.layers.Conv2D(8, (3, 3), padding="same", activation="relu"), tf.keras.layers.MaxPool2D((2, 2)), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(8, (3, 3), padding="same", activation="relu"), tf.keras.layers.MaxPool2D((2, 2), padding="same"), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu"), tf.keras.layers.MaxPool2D((2, 2), padding="same"), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(16, (3, 3), padding="same", activation="relu"), tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation="relu"), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(32, activation="relu"), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(num_classes, activation="softmax") ]) model.summary() ``` ## Compile and start training ``` epochs = 100 batch_size = 64 steps_per_epoch=1000 model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) def reshape_function(data, label): reshaped_data = tf.reshape(data, [-1, dim, 1]) return reshaped_data, label train_data = train_data.map(reshape_function) validation_data = validation_data.map(reshape_function) train_data = train_data.batch(batch_size).repeat() validation_data = validation_data.batch(batch_size) logdir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) # Uncomment the ines below if you like to see how training proceeds # %load_ext tensorboard # %tensorboard --logdir logdir model.fit( train_data, epochs=epochs, validation_data=validation_data, steps_per_epoch=steps_per_epoch, validation_steps=int((validation_length - 1) / batch_size + 1), callbacks=[tensorboard_callback]) ``` ## Evaluate the trained model on test dataset ``` test_data = test_data.map(reshape_function) test_labels = np.zeros(test_length) # There is no easy function to get the labels back from the tf.data.Dataset :( # Need to iterate over dataset idx = 0 for data, label in test_data: test_labels[idx] = label.numpy() idx += 1 test_data = test_data.batch(batch_size) loss, acc = model.evaluate(test_data) pred = np.argmax(model.predict(test_data), axis=1) # Create a confusion matrix to see how model predicts confusion = tf.math.confusion_matrix(labels=tf.constant(test_labels), predictions=tf.constant(pred), num_classes=num_classes) print(confusion) ``` ## Convert model to TFLite format ### Note: Currently quantized TFLite format does not work with TFLite Micro library ``` converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() open("model.tflite", "wb").write(tflite_model) # Convert the model to the TensorFlow Lite format with quantization converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] tflite_model = converter.convert() open("model_quantized.tflite", "wb").write(tflite_model) ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Introduction</a></span><ul class="toc-item"><li><span><a href="#Example-01:-Extract-text" data-toc-modified-id="Example-01:-Extract-text-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Example 01: Extract text</a></span><ul class="toc-item"><li><span><a href="#Write-the-code-for-the-main-steps-aiming-web-scraping" data-toc-modified-id="Write-the-code-for-the-main-steps-aiming-web-scraping-1.1.1"><span class="toc-item-num">1.1.1&nbsp;&nbsp;</span>Write the code for the main steps aiming web scraping</a></span><ul class="toc-item"><li><span><a href="#Send-request-and-catch-response" data-toc-modified-id="Send-request-and-catch-response-1.1.1.1"><span class="toc-item-num">1.1.1.1&nbsp;&nbsp;</span>Send request and catch response</a></span></li><li><span><a href="#get-the-content-of-the-response" data-toc-modified-id="get-the-content-of-the-response-1.1.1.2"><span class="toc-item-num">1.1.1.2&nbsp;&nbsp;</span>get the content of the response</a></span></li><li><span><a href="#parse-webpage" data-toc-modified-id="parse-webpage-1.1.1.3"><span class="toc-item-num">1.1.1.3&nbsp;&nbsp;</span>parse webpage</a></span></li><li><span><a href="#Extra:-Use-prettify-to-have-a-'prettier'-view-of-the-page's-code" data-toc-modified-id="Extra:-Use-prettify-to-have-a-'prettier'-view-of-the-page's-code-1.1.1.4"><span class="toc-item-num">1.1.1.4&nbsp;&nbsp;</span>Extra: Use prettify to have a 'prettier' view of the page's code</a></span></li></ul></li><li><span><a href="#Title?" data-toc-modified-id="Title?-1.1.2"><span class="toc-item-num">1.1.2&nbsp;&nbsp;</span>Title?</a></span></li><li><span><a href="#Text-per-section-(e.g.-1.-What-is-cryptocurrency?)" data-toc-modified-id="Text-per-section-(e.g.-1.-What-is-cryptocurrency?)-1.1.3"><span class="toc-item-num">1.1.3&nbsp;&nbsp;</span>Text per section (e.g. 1. What is cryptocurrency?)</a></span></li></ul></li><li><span><a href="#Example-02:-Extract-table-info" data-toc-modified-id="Example-02:-Extract-table-info-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Example 02: Extract table info</a></span></li><li><span><a href="#Example-03:-Extract-information-from-hyperlink" data-toc-modified-id="Example-03:-Extract-information-from-hyperlink-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Example 03: Extract information from hyperlink</a></span></li></ul></li></ul></div> # Introduction ``` # importing packages import requests from bs4 import BeautifulSoup import pandas as pd ``` ## Example 01: Extract text ``` url_01 = "https://www.nerdwallet.com/article/investing/cryptocurrency-7-things-to-know#:~:text=A%20cryptocurrency%20(or%20%E2%80%9Ccrypto%E2%80%9D,sell%20or%20trade%20them%20securely." ``` ### Write the code for the main steps aiming web scraping #### Send request and catch response ``` # response = ``` #### get the content of the response ``` # content = ``` #### parse webpage ``` # parser = ``` #### Extra: Use prettify to have a 'prettier' view of the page's code `parser` is a `BeautifulSoup object`, which represents the document as a nested data structure. The `prettify()` method will turn a Beautiful Soup parse tree into a nicely formatted Unicode string, making it much easy to visualize the tree structure. ``` def parse_website(url): """ Parse content of a website Args: url (str): url of the website of which we want to acess the content Return: parser: representation of the document as a nested data structure. """ # Send request and catch response response = requests.get(url) # get the content of the response content = response.content # parse webpage parser = BeautifulSoup(content, "lxml") return parser parser_01 = parse_website(url_01) ``` ### Title? ``` # access title of the web page #obtain text between tags ``` ### Text per section (e.g. 1. What is cryptocurrency?) 1. Access subtitles (titles of sections e.g. "Cryptocurrency definition") ![](../images/crypto_currency_section.png) ``` # subtitles = # texts = # text_01 = texts[0:6] # text_01 ``` Apply some cleaning to the piece of text bellow if you have time... ``` # text_01 = text_01[0:4] ``` ## Example 02: Extract table info ``` url_02 = "https://www.worldometers.info/population/countries-in-the-eu-by-population/" parser_02 = parse_website(url_02) print(parser_02.prettify()) ``` ![](../images/population_EU.png) ``` # Obtain information from tag <table> # table = # table # tip: prettify table to see better the information you are looking for # Obtain column names within tag <th> with attribute col # list_col = # Clean text # list_col = # list_col # Create a dataframe # EU_population_data = ``` From the table prettify we see that the rows are located under tag <tr> and items are located under tag <td> . Use this info and fill your dataframe. ``` # Create a for loop to fill EU_population_data # EU_population_data ``` ## Example 03: Extract information from hyperlink Applying web scraping to [`https://jadsmkbdatalab.nl/voorbeeldcases/`](https://jadsmkbdatalab.nl/voorbeeldcases/). Right click to `inspect` element in the webpage. Notice that the information we look for is between h3's... ![](../images/mkb_inspect_page.PNG) In this example, you will face something new. Before doing as usual and using the parser function check the response using requests. Which code did you get? TIP: Check this [stackoverflow answer](https://stackoverflow.com/questions/38489386/python-requests-403-forbidden) ``` url_03 = 'https://jadsmkbdatalab.nl/voorbeeldcases/' # response = # response # Modify the steps we have learn to solve the issue # get response # get the content of the response # parse webpage # print(parser_03.prettify()) # find hyperlinks # links = # links # Obtain url of the links ``` Updating function to include headers... ``` def parse_website(url): """ Parse content of a website Args: url (str): url of the website of which we want to acess the content Return: parser: representation of the document as a nested data structure. """ # Send request and catch response headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"} response = requests.get(url, headers=headers) # get the content of the response content = response.content # parse webpage parser = BeautifulSoup(content, "lxml") return parser # parse and prettify one of the obtained urls # parser_03_0 = # find all paragraphs within parser_03_0 # paragraphs = # paragraphs # Obtain text of paragraphs # saving the content of this page ```
github_jupyter
# **Neural machine translation with attention** Today we will train a sequence to sequence (seq2seq) model for Spanish to English translation. This is an advanced example that assumes some knowledge of sequence to sequence models. After training the model in this notebook, you will be able to input a Spanish sentence, such as *"¿todavia estan en casa?"*, and return the English translation: *"are you still at home?"* The translation quality is reasonable for a toy example, but the generated attention plot is perhaps more interesting. This shows which parts of the input sentence has the model's attention while translating: <img src="https://tensorflow.org/images/spanish-english.png" alt="spanish-english attention plot"> Note: This example takes approximately 10 minutes to run on a single P100 GPU. ``` import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.ticker as ticker from sklearn.model_selection import train_test_split import unicodedata import re import numpy as np import os import io import time ``` ## **Download and prepare the dataset** We'll use a language dataset provided by http://www.manythings.org/anki/. This dataset contains language translation pairs in the format: ``` May I borrow this book? ¿Puedo tomar prestado este libro? ``` There are a variety of languages available, but we'll use the English-Spanish dataset. For convenience, we've hosted a copy of this dataset on Google Cloud, but you can also download your own copy. After downloading the dataset, here are the steps we'll take to prepare the data: 1. Add a *start* and *end* token to each sentence. 2. Clean the sentences by removing special characters. 3. Create a word index and reverse word index (dictionaries mapping from word → id and id → word). 4. Pad each sentence to a maximum length. ``` # Download the file path_to_zip = tf.keras.utils.get_file( 'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip', extract=True) path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt" # Converts the unicode file to ascii def unicode_to_ascii(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') def preprocess_sentence(w): w = unicode_to_ascii(w.lower().strip()) # creating a space between a word and the punctuation following it # eg: "he is a boy." => "he is a boy ." # Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation w = re.sub(r"([?.!,¿])", r" \1 ", w) w = re.sub(r'[" "]+', " ", w) # replacing everything with space except (a-z, A-Z, ".", "?", "!", ",") w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w) w = w.strip() # adding a start and an end token to the sentence # so that the model know when to start and stop predicting. w = '<start> ' + w + ' <end>' return w en_sentence = u"May I borrow this book?" sp_sentence = u"¿Puedo tomar prestado este libro?" print(preprocess_sentence(en_sentence)) print(preprocess_sentence(sp_sentence).encode('utf-8')) # 1. Remove the accents # 2. Clean the sentences # 3. Return word pairs in the format: [ENGLISH, SPANISH] def create_dataset(path, num_examples): lines = io.open(path, encoding='UTF-8').read().strip().split('\n') word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]] return zip(*word_pairs) en, sp = create_dataset(path_to_file, None) print(en[-1]) print(sp[-1]) def tokenize(lang): lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='') lang_tokenizer.fit_on_texts(lang) tensor = lang_tokenizer.texts_to_sequences(lang) tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post') return tensor, lang_tokenizer def load_dataset(path, num_examples=None): # creating cleaned input, output pairs targ_lang, inp_lang = create_dataset(path, num_examples) input_tensor, inp_lang_tokenizer = tokenize(inp_lang) target_tensor, targ_lang_tokenizer = tokenize(targ_lang) return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer ``` ### **Limit the size of the dataset to experiment faster (optional)** Training on the complete dataset of >100,000 sentences will take a long time. To train faster, we can limit the size of the dataset to 30,000 sentences (of course, translation quality degrades with less data): ``` # Try experimenting with the size of that dataset num_examples = 100000 input_tensor, target_tensor, inp_lang, targ_lang = load_dataset(path_to_file, num_examples) # Calculate max_length of the target tensors max_length_targ, max_length_inp = target_tensor.shape[1], input_tensor.shape[1] # Creating training and validation sets using an 80-20 split input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2) # Show length print(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val)) def convert(lang, tensor): for t in tensor: if t!=0: print ("%d ----> %s" % (t, lang.index_word[t])) print ("Input Language; index to word mapping") convert(inp_lang, input_tensor_train[0]) print () print ("Target Language; index to word mapping") convert(targ_lang, target_tensor_train[0]) ``` ### **Create a tf.data dataset** ``` BUFFER_SIZE = len(input_tensor_train) BATCH_SIZE = 64 steps_per_epoch = len(input_tensor_train)//BATCH_SIZE embedding_dim = 256 units = 1024 # better if embedding_dim*4 vocab_inp_size = len(inp_lang.word_index)+1 vocab_tar_size = len(targ_lang.word_index)+1 dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) example_input_batch, example_target_batch = next(iter(dataset)) example_input_batch.shape, example_target_batch.shape ``` ## **Write the encoder and decoder model** Implement an encoder-decoder model with attention which you can read about in the TensorFlow [Neural Machine Translation (seq2seq) tutorial](https://github.com/tensorflow/nmt). This example uses a more recent set of APIs. This notebook implements the [attention equations](https://github.com/tensorflow/nmt#background-on-the-attention-mechanism) from the seq2seq tutorial. The following diagram shows that each input words is assigned a weight by the attention mechanism which is then used by the decoder to predict the next word in the sentence. The below picture and formulas are an example of attention mechanism from [Luong's paper](https://arxiv.org/abs/1508.04025v5). <img src="https://www.tensorflow.org/images/seq2seq/attention_mechanism.jpg" width="500" alt="attention mechanism"> The input is put through an encoder model which gives us the encoder output of shape *(batch_size, max_length, hidden_size)* and the encoder hidden state of shape *(batch_size, hidden_size)*. Here are the equations that are implemented: <img src="https://www.tensorflow.org/images/seq2seq/attention_equation_0.jpg" alt="attention equation 0" width="800"> <img src="https://www.tensorflow.org/images/seq2seq/attention_equation_1.jpg" alt="attention equation 1" width="800"> This tutorial uses [Bahdanau attention](https://arxiv.org/pdf/1409.0473.pdf) for the encoder. Let's decide on notation before writing the simplified form: * FC = Fully connected (dense) layer * EO = Encoder output * H = hidden state * X = input to the decoder And the pseudo-code: * `score = FC(tanh(FC(EO) + FC(H)))` * `attention weights = softmax(score, axis = 1)`. Softmax by default is applied on the last axis but here we want to apply it on the *1st axis*, since the shape of score is *(batch_size, max_length, hidden_size)*. `Max_length` is the length of our input. Since we are trying to assign a weight to each input, softmax should be applied on that axis. * `context vector = sum(attention weights * EO, axis = 1)`. Same reason as above for choosing axis as 1. * `embedding output` = The input to the decoder X is passed through an embedding layer. * `merged vector = concat(embedding output, context vector)` * This merged vector is then given to the GRU The shapes of all the vectors at each step have been specified in the comments in the code: ``` class Encoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz): super(Encoder, self).__init__() self.batch_sz = batch_sz self.enc_units = enc_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.enc_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') def call(self, x, hidden): x = self.embedding(x) output, state = self.gru(x, initial_state = hidden) return output, state def initialize_hidden_state(self): return tf.zeros((self.batch_sz, self.enc_units)) encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE) # sample input sample_hidden = encoder.initialize_hidden_state() sample_output, sample_hidden = encoder(example_input_batch, sample_hidden) print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape)) print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape)) class BahdanauAttention(tf.keras.layers.Layer): def __init__(self, units): super(BahdanauAttention, self).__init__() self.W1 = tf.keras.layers.Dense(units) self.W2 = tf.keras.layers.Dense(units) self.V = tf.keras.layers.Dense(1) def call(self, query, values): # query hidden state shape == (batch_size, hidden size) # query_with_time_axis shape == (batch_size, 1, hidden size) # values shape == (batch_size, max_len, hidden size) # we are doing this to broadcast addition along the time axis to calculate the score query_with_time_axis = tf.expand_dims(query, 1) # score shape == (batch_size, max_length, 1) # we get 1 at the last axis because we are applying score to self.V # the shape of the tensor before applying self.V is (batch_size, max_length, units) score = self.V(tf.nn.tanh( self.W1(query_with_time_axis) + self.W2(values))) # attention_weights shape == (batch_size, max_length, 1) attention_weights = tf.nn.softmax(score, axis=1) # context_vector shape after sum == (batch_size, hidden_size) context_vector = attention_weights * values context_vector = tf.reduce_sum(context_vector, axis=1) return context_vector, attention_weights attention_layer = BahdanauAttention(10) attention_result, attention_weights = attention_layer(sample_hidden, sample_output) print("Attention result shape: (batch size, units) {}".format(attention_result.shape)) print("Attention weights shape: (batch_size, sequence_length, 1) {}".format(attention_weights.shape)) class Decoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz): super(Decoder, self).__init__() self.batch_sz = batch_sz self.dec_units = dec_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.dec_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') self.fc = tf.keras.layers.Dense(vocab_size) # used for attention self.attention = BahdanauAttention(self.dec_units) def call(self, x, hidden, enc_output): # enc_output shape == (batch_size, max_length, hidden_size) context_vector, attention_weights = self.attention(hidden, enc_output) # x shape after passing through embedding == (batch_size, 1, embedding_dim) x = self.embedding(x) # x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size) x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1) # passing the concatenated vector to the GRU output, state = self.gru(x) # output shape == (batch_size * 1, hidden_size) output = tf.reshape(output, (-1, output.shape[2])) # output shape == (batch_size, vocab) x = self.fc(output) return x, state, attention_weights decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE) sample_decoder_output, _, _ = decoder(tf.random.uniform((BATCH_SIZE, 1)), sample_hidden, sample_output) print ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape)) ``` ## **Define the optimizer and the loss function** ``` optimizer = tf.keras.optimizers.Adam() loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) ``` ## **Checkpoints (Object-based saving)** ``` checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder) ``` ## **Training** 1. Pass the *input* through the *encoder* which return *encoder output* and the *encoder hidden state*. 2. The encoder output, encoder hidden state and the decoder input (which is the *start token*) is passed to the decoder. 3. The decoder returns the *predictions* and the *decoder hidden state*. 4. The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss. 5. Use *teacher forcing* to decide the next input to the decoder. 6. *Teacher forcing* is the technique where the *target word* is passed as the *next input* to the decoder. 7. The final step is to calculate the gradients and apply it to the optimizer and backpropagate. ``` @tf.function def train_step(inp, targ, enc_hidden): loss = 0 with tf.GradientTape() as tape: enc_output, enc_hidden = encoder(inp, enc_hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1) # Teacher forcing - feeding the target as the next input for t in range(1, targ.shape[1]): # passing enc_output to the decoder predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output) loss += loss_function(targ[:, t], predictions) # using teacher forcing dec_input = tf.expand_dims(targ[:, t], 1) batch_loss = (loss / int(targ.shape[1])) variables = encoder.trainable_variables + decoder.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) return batch_loss EPOCHS = 10 for epoch in range(EPOCHS): start = time.time() enc_hidden = encoder.initialize_hidden_state() total_loss = 0 for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)): batch_loss = train_step(inp, targ, enc_hidden) total_loss += batch_loss if batch % 100 == 0: print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, batch, batch_loss.numpy())) # saving (checkpoint) the model every 2 epochs if (epoch + 1) % 2 == 0: checkpoint.save(file_prefix = checkpoint_prefix) print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / steps_per_epoch)) print('Time taken for 1 epoch {} sec\n'.format(time.time() - start)) ``` ## **Translate** * The evaluate function is similar to the training loop, except we don't use *teacher forcing* here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output. * Stop predicting when the model predicts the *end token*. * And store the *attention weights for every time step*. Note: The encoder output is calculated only once for one input. ``` def evaluate(sentence): attention_plot = np.zeros((max_length_targ, max_length_inp)) sentence = preprocess_sentence(sentence) inputs = [inp_lang.word_index[i] for i in sentence.split(' ')] inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post') inputs = tf.convert_to_tensor(inputs) result = '' hidden = [tf.zeros((1, units))] enc_out, enc_hidden = encoder(inputs, hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0) for t in range(max_length_targ): predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out) # storing the attention weights to plot later on attention_weights = tf.reshape(attention_weights, (-1, )) attention_plot[t] = attention_weights.numpy() predicted_id = tf.argmax(predictions[0]).numpy() result += targ_lang.index_word[predicted_id] + ' ' if targ_lang.index_word[predicted_id] == '<end>': return result, sentence, attention_plot # the predicted ID is fed back into the model dec_input = tf.expand_dims([predicted_id], 0) return result, sentence, attention_plot # function for plotting the attention weights def plot_attention(attention, sentence, predicted_sentence): fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(1, 1, 1) ax.matshow(attention, cmap='viridis') fontdict = {'fontsize': 14} ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90) ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict) ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.show() def translate(sentence): result, sentence, attention_plot = evaluate(sentence) print('Input: %s' % (sentence)) print('Predicted translation: {}'.format(result)) attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))] plot_attention(attention_plot, sentence.split(' '), result.split(' ')) ``` ## **Restore the latest checkpoint and test** ``` # restoring the latest checkpoint in checkpoint_dir checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) translate(u'hace mucho frio aqui.') translate(u'esta es mi vida.') translate(u'¿todavia estan en casa?') # as near translation translate(u'trata de averiguarlo.') ```
github_jupyter
# Predict google map review dataset ## model - kcbert - fine-tuned with naver shopping review dataset (200,000개) - train 5 epochs - 0.97 accuracy ## dataset - google map review of tourist places in Daejeon, Korea ``` import torch from torch import nn, Tensor from torch.optim import Optimizer from torch.utils.data import DataLoader, RandomSampler, DistributedSampler, random_split from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.nn import CrossEntropyLoss from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning import LightningModule, Trainer, seed_everything from pytorch_lightning.metrics.functional import accuracy, precision, recall from transformers import AdamW, BertForSequenceClassification, AdamW, BertConfig, AutoTokenizer, BertTokenizer, TrainingArguments from keras.preprocessing.sequence import pad_sequences import random import numpy as np import time import datetime import pandas as pd import os from tqdm import tqdm import pandas as pd from transformers import AutoTokenizer, AutoModelWithLMHead from keras.preprocessing.sequence import pad_sequences if torch.cuda.is_available(): device = torch.device("cuda") print('There are %d GPU(s) available.' % torch.cuda.device_count()) print('We will use the GPU:', torch.cuda.get_device_name(0)) else: print('No GPU available, using the CPU instead.') device = torch.device("cpu") pj_path = os.getenv('HOME') + '/Projects/JeongCheck' data_path = pj_path + '/compare' data_list = os.listdir(data_path) print(len(data_list)) data_list file_list = os.listdir(data_path) file_list spacing = pd.read_csv(data_path + f'/{file_list[0]}') spell = pd.read_csv(data_path + f'/{file_list[1]}') spacing.head() spell.head() len(spacing), len(spell) print(spacing.isna().sum()) print('\n') print(spell.isna().sum()) print(set(spacing.label)) print(set(spell.label)) print(len(spacing[spacing.label==2])) print(len(spell[spell.label==2])) test_spac = spacing.copy() test_spel = spell.copy() print(len(test_spac), len(test_spel)) ``` 중립 데이터 제외 ``` test_spac = test_spac[test_spac.label != 2] print(len(test_spac)) test_spel = test_spel[test_spel.label != 2] print(len(test_spel)) from transformers import BertForSequenceClassification, AdamW, BertConfig tokenizer = AutoTokenizer.from_pretrained("beomi/kcbert-base") # Load BertForSequenceClassification, the pretrained BERT model with a single # linear classification layer on top. model = BertForSequenceClassification.from_pretrained( pj_path + "/bert_model/checkpoint-2000", num_labels = 2, output_attentions = False, # Whether the model returns attentions weights. output_hidden_states = False, # Whether the model returns all hidden-states. ) params = list(model.named_parameters()) print('The BERT model has {:} different named parameters.\n'.format(len(params))) print('==== Embedding Layer ====\n') for p in params[0:5]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== First Transformer ====\n') for p in params[5:21]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== Output Layer ====\n') for p in params[-4:]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) def convert_input_data(sentences): tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences] MAX_LEN = 64 # 토큰을 숫자 인덱스로 변환 input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts] # 문장을 MAX_LEN 길이에 맞게 자르고, 모자란 부분을 패딩 0으로 채움 input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post") # 어텐션 마스크 초기화 attention_masks = [] # 어텐션 마스크를 패딩이 아니면 1, 패딩이면 0으로 설정 for seq in input_ids: seq_mask = [float(i>0) for i in seq] attention_masks.append(seq_mask) inputs = torch.tensor(input_ids) masks = torch.tensor(attention_masks) return inputs, masks def test_sentences(sentences): # 평가모드로 변경!!!!! model.eval() inputs, masks = convert_input_data(sentences) # 데이터를 GPU에 넣음 b_input_ids = inputs.to(device) b_input_mask = masks.to(device) # 그래디언트 계산 안함 with torch.no_grad(): # Forward 수행 outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask) # 로스 구함 logits = outputs[0] # CPU로 데이터 이동 logits = logits.detach().cpu().numpy() return logits device = "cuda:0" model = model.to(device) ``` ## 데이터 변환 ``` def preprocessing(df): df.document=df.comment.replace('[^A-Za-zㄱ-ㅎㅏ-ㅣ가-힣]+','') return df # result = preprocessing(gr_data) # result = result.dropna() # print(result) # 감성분석할 comment 추출 def export_com(preprocessed_df): sens =[] for sen in preprocessed_df.comment: sens.append(sen) print('check lenght :', len(sens), len(preprocessed_df)) # 개수 확인 print('sample sentence :', sens[1]) return sens def make_predicted_label(sen): sen = [sen] score = test_sentences(sen) result = np.argmax(score) if result == 0: # negative return 0 elif result == 1: # positive return 1 def predict_label(model, df, place_name): result = preprocessing(df) result = result.dropna() sens = export_com(result) scores_data=[] for sen in sens: scores_data.append(make_predicted_label(sen)) df['pred'] = scores_data cor = df[df.label == df.pred] uncor = df[df.label != df.pred] print('correct prediction num :', len(cor)) print('uncorrect prediction num :', len(uncor)) print('correct label check :' ,set(cor.label)) # df.to_csv(pj_path + f'/sentiment_data/{place_name}_pred_kcbert.csv') return df print('### spacing ###') predict_spac = predict_label(model, test_spac, 'total') print('### spell ###') predict_spel = predict_label(model, test_spel, 'total') ``` ## Loss (RMSE) ``` from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error import math def rmse(y, y_pred): from sklearn.metrics import mean_squared_error import math print('lenght check (origin, prediction):', len(y), len(y_pred)) rmse_label = math.sqrt(mean_squared_error(y, y_pred)) print('rmse of label :', rmse_label) ``` ## Accuracy ``` def acc(y, y_pred, total): correct = (y_pred == y).sum().item() print(f'Accuracy of the network on the {total} test text: %d %%' % ( 100 * correct / total)) ``` ## f1-score ``` from sklearn.metrics import f1_score, classification_report def f1(y, y_pred): score = f1_score(y, y_pred) report = classification_report(y, y_pred) print('f1 score :', score) print('===== classification report =====') print(report) ``` ## calculate performance - RMSE - Accuracy - f1-score ``` def cal_perform(df): y = df.label y_pred = df.pred if len(y) == len(y_pred): total = len(y) print('label length :', total) else: print('It has different length !') rmse(y, y_pred) acc(y, y_pred, total) f1(y, y_pred) print('===== spacing =====') cal_perform(predict_spac) print('===== spell =====') cal_perform(predict_spel) ```
github_jupyter
### Testing for Interactive use case ``` import mlflow from azureml.core import Workspace, Experiment, Environment, Datastore, Dataset, ScriptRunConfig from azureml.core.runconfig import PyTorchConfiguration # from azureml.widgets import RunDetails from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException from azureml.core.runconfig import PyTorchConfiguration from azureml.core.environment import Environment from azureml.core.conda_dependencies import CondaDependencies from IPython.display import clear_output import time import platform # from ray_on_azureml.ray_on_aml import getRay import sys sys.path.append("../") # go to parent dir import importlib from src.ray_on_azureml.ray_on_aml import Ray_On_AML ws = Workspace.from_config() ray_on_aml =Ray_On_AML(ws=ws, compute_cluster ="worker-cpu-v3") _, ray = ray_on_aml.getRay() ray.cluster_resources() ray_on_aml.shutdown() # import ray # ray.shutdown() # ray.init() ``` ### Testing with Dask on Ray ``` # import ray # ray.init() from ray.util.dask import ray_dask_get import dask import dask.array as da import dask.dataframe as dd import numpy as np import pandas as pd dask.config.set(scheduler=ray_dask_get) d_arr = da.from_array(np.random.randint(0, 1000, size=(256, 256))) # The Dask scheduler submits the underlying task graph to Ray. d_arr.mean().compute(scheduler=ray_dask_get) # Set the scheduler to ray_dask_get in your config so you don't have to # specify it on each compute call. df = dd.from_pandas( pd.DataFrame( np.random.randint(0, 10000, size=(1024, 2)), columns=["age", "grade"]), npartitions=2) df.groupby(["age"]).mean().compute() # ray.shutdown() import dask.dataframe as dd storage_options = {'account_name': 'azureopendatastorage'} ddf = dd.read_parquet('az://nyctlc/green/puYear=2019/puMonth=*/*.parquet', storage_options=storage_options) ddf.count().compute() #dask # import ray from ray.util.dask import ray_dask_get import dask import dask.array as da import dask.dataframe as dd import numpy as np import pandas as pd import dask import dask.dataframe as dd import matplotlib.pyplot as plt from datetime import datetime from azureml.core import Workspace, Dataset, Model from adlfs import AzureBlobFileSystem account_key = ws.get_default_keyvault().get_secret("adls7-account-key") account_name="adlsgen7" abfs = AzureBlobFileSystem(account_name="adlsgen7",account_key=account_key, container_name="mltraining") abfs2 = AzureBlobFileSystem(account_name="azureopendatastorage", container_name="isdweatherdatacontainer") storage_options={'account_name': account_name, 'account_key': account_key} # ddf = dd.read_parquet('az://mltraining/ISDWeatherDelta/year2008', storage_options=storage_options) data = ray.data.read_parquet("az://isdweatherdatacontainer/ISDWeather/year=2009", filesystem=abfs2) data2 = ray.data.read_parquet("az://mltraining/ISDWeatherDelta/year2008", filesystem=abfs) data.count() ``` ### Testing Ray Tune for distributed ML tunning ``` import numpy as np import torch import torch.optim as optim import torch.nn as nn from torchvision import datasets, transforms from torch.utils.data import DataLoader import torch.nn.functional as F # import ray from ray import tune from ray.tune.schedulers import ASHAScheduler class ConvNet(nn.Module): def __init__(self): super(ConvNet, self).__init__() # In this example, we don't change the model architecture # due to simplicity. self.conv1 = nn.Conv2d(1, 3, kernel_size=3) self.fc = nn.Linear(192, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 3)) x = x.view(-1, 192) x = self.fc(x) return F.log_softmax(x, dim=1) # Change these values if you want the training to run quicker or slower. EPOCH_SIZE = 512 TEST_SIZE = 256 def train(model, optimizer, train_loader): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.train() for batch_idx, (data, target) in enumerate(train_loader): # We set this just for the example to run quickly. if batch_idx * len(data) > EPOCH_SIZE: return data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() def test(model, data_loader): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.eval() correct = 0 total = 0 with torch.no_grad(): for batch_idx, (data, target) in enumerate(data_loader): # We set this just for the example to run quickly. if batch_idx * len(data) > TEST_SIZE: break data, target = data.to(device), target.to(device) outputs = model(data) _, predicted = torch.max(outputs.data, 1) total += target.size(0) correct += (predicted == target).sum().item() return correct / total def train_mnist(config): # Data Setup mnist_transforms = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) train_loader = DataLoader( datasets.MNIST("~/data", train=True, download=True, transform=mnist_transforms), batch_size=64, shuffle=True) test_loader = DataLoader( datasets.MNIST("~/data", train=False, transform=mnist_transforms), batch_size=64, shuffle=True) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = ConvNet() model.to(device) optimizer = optim.SGD( model.parameters(), lr=config["lr"], momentum=config["momentum"]) for i in range(10): train(model, optimizer, train_loader) acc = test(model, test_loader) # Send the current training result back to Tune tune.report(mean_accuracy=acc) if i % 5 == 0: # This saves the model to the trial directory torch.save(model.state_dict(), "./model.pth") search_space = { "lr": tune.sample_from(lambda spec: 10**(-10 * np.random.rand())), "momentum": tune.uniform(0.01, 0.09) } # Uncomment this to enable distributed execution # ray.shutdown() # ray.init(address="auto",ignore_reinit_error=True) # ray.init(address =f'ray://{headnode_private_ip}:10001',allow_multiple=True,ignore_reinit_error=True ) # Download the dataset first datasets.MNIST("~/data", train=True, download=True) analysis = tune.run(train_mnist, config=search_space) import sklearn.datasets import sklearn.metrics from sklearn.model_selection import train_test_split import xgboost as xgb from ray import tune def train_breast_cancer(config): # Load dataset data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True) # Split into train and test set train_x, test_x, train_y, test_y = train_test_split( data, labels, test_size=0.25) # Build input matrices for XGBoost train_set = xgb.DMatrix(train_x, label=train_y) test_set = xgb.DMatrix(test_x, label=test_y) # Train the classifier results = {} xgb.train( config, train_set, evals=[(test_set, "eval")], evals_result=results, verbose_eval=False) # Return prediction accuracy accuracy = 1. - results["eval"]["error"][-1] tune.report(mean_accuracy=accuracy, done=True) config = { "objective": "binary:logistic", "eval_metric": ["logloss", "error"], "max_depth": tune.randint(1, 9), "min_child_weight": tune.choice([1, 2, 3]), "subsample": tune.uniform(0.5, 1.0), "eta": tune.loguniform(1e-4, 1e-1) } analysis = tune.run( train_breast_cancer, resources_per_trial={"cpu": 1}, config=config, num_samples=10) ``` ### Testing Spark on Ray ``` import ray import raydp import os ray.shutdown() ray.init() os.environ["PYSPARK_PYTHON"]="/anaconda/envs/azureml_py38/bin/python3" # ray.init(address ='ray://10.0.0.11:6379') spark = raydp.init_spark( app_name = "example", num_executors = 2, executor_cores = 1, executor_memory = "1gb" ) # data =spark.read.format("csv").option("header", True).load("wasbs://ojsales-simulatedcontainer@azureopendatastorage.blob.core.windows.net/oj_sales_data/Store10*.csv") # # normal data processesing with Spark # df = spark.createDataFrame([('look',), ('spark',), ('tutorial',), ('spark',), ('look', ), ('python', )], ['word']) # df.show() # word_count = df.groupBy('word').count() # word_count.show() import pandas as pd from pyspark.sql.functions import col, pandas_udf from pyspark.sql.types import LongType # Declare the function and create the UDF def multiply_func(a: pd.Series, b: pd.Series) -> pd.Series: return a * b multiply = pandas_udf(multiply_func, returnType=LongType()) # The function for a pandas_udf should be able to execute with local Pandas data x = pd.Series([1, 2, 3]) print(multiply_func(x, x)) # 0 1 # 1 4 # 2 9 # dtype: int64 # Create a Spark DataFrame, 'spark' is an existing SparkSession df = spark.createDataFrame(pd.DataFrame(x, columns=["x"])) # Execute function as a Spark vectorized UDF df.select(multiply(col("x"), col("x"))).show() # +-------------------+ # |multiply_func(x, x)| # +-------------------+ # | 1| # | 4| # | 9| # +-------------------+ # stop the spark cluster raydp.stop_spark() raydp.stop_spark() ``` ## Testing Ray on Job Cluster ``` # pyarrow >=6.0.1 # dask >=2021.11.2 # adlfs >=2021.10.0 # fsspec==2021.10.1 # ray[default]==1.9.0 ws = Workspace.from_config() # base_conda_dep =['adlfs>=2021.10.0','pytorch','matplotlib','torchvision','pip'] # base_pip_dep = ['sklearn','xgboost','lightgbm','ray[default]==1.9.0', 'xgboost_ray', 'dask','pyarrow>=6.0.1', 'azureml-mlflow'] compute_cluster = 'worker-cpu-v3' maxnode =5 vm_size='STANDARD_DS3_V2' vnet='rayvnet' subnet='default' exp ='ray_on_aml_job' ws_detail = ws.get_details() ws_rg = ws_detail['id'].split("/")[4] vnet_rg=None try: ray_cluster = ComputeTarget(workspace=ws, name=compute_cluster) print('Found existing cluster, use it.') except ComputeTargetException: if vnet_rg is None: vnet_rg = ws_rg compute_config = AmlCompute.provisioning_configuration(vm_size=vm_size, min_nodes=0, max_nodes=maxnode, vnet_resourcegroup_name=vnet_rg, vnet_name=vnet, subnet_name=subnet) ray_cluster = ComputeTarget.create(ws, compute_cluster, compute_config) ray_cluster.wait_for_completion(show_output=True) # python_version = ["python="+platform.python_version()] # conda_packages = python_version+base_conda_dep # pip_packages = base_pip_dep # conda_dep = CondaDependencies() # rayEnv = Environment(name="rayEnv") rayEnv = Environment.get(ws, "rayEnv", version=16) # for conda_package in conda_packages: # conda_dep.add_conda_package(conda_package) # for pip_package in pip_packages: # conda_dep.add_pip_package(pip_package) # # Adds dependencies to PythonSection of myenv # rayEnv.python.conda_dependencies=conda_dep src = ScriptRunConfig(source_directory='job', script='aml_job.py', environment=rayEnv, compute_target=ray_cluster, distributed_job_config=PyTorchConfiguration(node_count=maxnode), # arguments = ["--master_ip",master_ip] ) run = Experiment(ws, exp).submit(src) ```
github_jupyter
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/2_transfer_learning_roadmap/5_exploring_model_families/2_vgg/1.1)%20Intro%20to%20vgg%20network%20-%20mxnet%20backend.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Goals ### Train a architectural heritage site classifier using vgg16 ### Understand what lies inside vgg network # What is vgg ## Readings on vgg 1) Points from https://towardsdatascience.com/vgg-neural-networks-the-next-step-after-alexnet-3f91fa9ffe2c - VGG addresses another very important aspect of CNNs: depth - All of VGG’s hidden layers use ReLU - Unlike 11x11 kernels of alexnet, it uses smaller ones 1x1 and 3x3 kernels 2) Points from https://becominghuman.ai/what-is-the-vgg-neural-network-a590caa72643 - Intuitively, more layer is better. However, the authors found that VGG-16 is better than VGG-19 - Authors introduce multi-scale evaluationin the paper 3) Read more here - - https://arxiv.org/abs/1409.1556 - https://machinelearningmastery.com/use-pre-trained-vgg-model-classify-objects-photographs/ - https://www.cs.toronto.edu/~frossard/post/vgg16/ - https://d2l.ai/chapter_convolutional-modern/vgg.html # Table of Contents ## [0. Install](#0) ## [1. Load experiment with vgg base architecture](#1) ## [2. Visualize vgg](#2) ## [3. Train the classifier](#3) ## [4. Run inference on trained classifier](#5) <a id='0'></a> # Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt - (Select the requirements file as per OS and CUDA version) ``` !git clone https://github.com/Tessellate-Imaging/monk_v1.git # If using Colab install using the commands below !cd monk_v1/installation/Misc && pip install -r requirements_colab.txt # If using Kaggle uncomment the following command #!cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt # Select the requirements file as per OS and CUDA version when using a local system or cloud #!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt ``` ## Dataset - Architectural Heritage site Classification - https://old.datahub.io/dataset/architectural-heritage-elements-image-dataset ``` ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC" -O architectural_heritage.zip && rm -rf /tmp/cookies.txt ! unzip -qq architectural_heritage.zip ``` # Imports ``` # Monk import os import sys sys.path.append("monk_v1/monk/"); #Using mxnet-gluon backend from gluon_prototype import prototype ``` <a id='1'></a> # Load experiment with vgg base architecture ## Creating and managing experiments - Provide project name - Provide experiment name - For a specific data create a single project - Inside each project multiple experiments can be created - Every experiment can be have diferent hyper-parameters attached to it ``` gtf = prototype(verbose=1); gtf.Prototype("Project", "vgg-intro"); ``` ### This creates files and directories as per the following structure workspace | |--------Project | | |-----vgg-intro | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) ## Set dataset and select the model ## Quick mode training - Using Default Function - dataset_path - model_name - freeze_base_network - num_epochs ## Sample Dataset folder structure architectural_heritage | |-----train |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) | | |-----val |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) ``` gtf.Default(dataset_path="architectural_heritage/train", model_name="vgg16", freeze_base_network=False, num_epochs=5); ``` ## From the summary above - Model Params Model name: vgg16 Num of potentially trainable layers: 16 Num of actual trainable layers: 16 <a id='2'></a> # Visualize vgg ``` gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8082); ``` ## vgg block - 1 - Creating network and blocks using monk from scratch will be dealt in different roadmap series ``` from IPython.display import Image Image(filename='imgs/vgg_block1_mxnet.png') ``` ## Properties - This block has 3 layers - conv -> relu ## vgg block - 2 - Creating network and blocks using monk from scratch will be dealt in different roadmap series ``` from IPython.display import Image Image(filename='imgs/vgg_block2_mxnet.png') ``` ## Properties - This block has 3 layers - conv -> relu -> max_pool ## vgg fully connected chain ``` from IPython.display import Image Image(filename='imgs/vgg_block_fc_mxnet.png') ``` ## vgg Network - Creating network and blocks using monk from scratch will be dealt in different roadmap series ``` from IPython.display import Image Image(filename='imgs/vgg16_mxnet.png') ``` ## Properties - This network - has 9 type-1 blocks - has 5 type-2 blocks - post these blocks the type-3 (fc) block exists <a id='3'></a> # Train the classifier ``` #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ``` <a id='4'></a> # Run inference on trained classifier ``` gtf = prototype(verbose=1); gtf.Prototype("Project", "vgg-intro", eval_infer=True); output = gtf.Infer(img_name = "architectural_heritage/test/test1.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test1.jpg') output = gtf.Infer(img_name = "architectural_heritage/test/test2.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test2.jpg') output = gtf.Infer(img_name = "architectural_heritage/test/test3.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test3.jpg') ```
github_jupyter
# Use PMML to predict iris species with `ibm-watson-machine-learning` This notebook contains steps from storing sample PMML model to starting scoring new data. Some familiarity with python is helpful. This notebook uses Python 3. You will use a **Iris** data set, which details measurements of iris perianth. Use the details of this data set to predict iris species. ## Learning goals The learning goals of this notebook are: - Working with the WML instance - Online deployment of PMML model - Scoring of deployed model ## Contents This notebook contains the following parts: 1. [Setup](#setup) 2. [Model upload](#upload) 3. [Web service creation](#deploy) 4. [Scoring](#score) 5. [Clean up](#cleanup) 6. [Summary and next steps](#summary) <a id="setup"></a> ## 1. Set up the environment Before you use the sample code in this notebook, you must perform the following setup tasks: - Contact with your Cloud Pack for Data administrator and ask him for your account credentials ### Connection to WML Authenticate the Watson Machine Learning service on IBM Cloud Pack for Data. You need to provide platform `url`, your `username` and `password`. ``` wml_credentials = { "username": username, "password": password, "url": url, "instance_id": 'openshift', "version": '3.5' } ``` ### Install and import the `ibm-watson-machine-learning` package **Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>. ``` !pip install -U ibm-watson-machine-learning from ibm_watson_machine_learning import APIClient client = APIClient(wml_credentials) ``` ### Working with spaces First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use `{PLATFORM_URL}/ml-runtime/spaces?context=icp4data` to create one. - Click New Deployment Space - Create an empty space - Go to space `Settings` tab - Copy `space_id` and paste it below **Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Space%20management.ipynb). **Action**: Assign space ID below ``` space_id = 'PASTE YOUR SPACE ID HERE' ``` You can use `list` method to print all existing spaces. ``` client.spaces.list(limit=10) ``` To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using. ``` client.set.default_space(space_id) ``` <a id="upload"></a> ## 2. Upload model In this section you will learn how to upload the model to the Cloud. **Action**: Download sample PMML model from git project using wget. ``` import os from wget import download sample_dir = 'pmml_sample_model' if not os.path.isdir(sample_dir): os.mkdir(sample_dir) filename=os.path.join(sample_dir, 'iris_chaid.xml') if not os.path.isfile(filename): filename = download('https://raw.githubusercontent.com/IBM/watson-machine-learning-samples/master/cpd3.5/models/pmml/iris-species/model/iris_chaid.xml', out=sample_dir) ``` Store downloaded file in Watson Machine Learning repository. ``` sw_spec_uid = client.software_specifications.get_uid_by_name("spark-mllib_2.4") meta_props = { client.repository.ModelMetaNames.NAME: "pmmlmodel", client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sw_spec_uid, client.repository.ModelMetaNames.TYPE: 'pmml_4.2.1'} published_model = client.repository.store_model(model=filename, meta_props=meta_props) ``` **Note:** You can see that model is successfully stored in Watson Machine Learning Service. ``` client.repository.list_models() ``` <a id="deployment"></a> ## 3. Create online deployment You can use commands bellow to create online deployment for stored model (web service). ``` model_uid = client.repository.get_model_uid(published_model) deployment = client.deployments.create( artifact_uid=model_uid, meta_props={ client.deployments.ConfigurationMetaNames.NAME: "Test deployment", client.deployments.ConfigurationMetaNames.ONLINE:{}} ) ``` <a id="scoring"></a> ## 4. Scoring You can send new scoring records to web-service deployment using `score` method. ``` deployment_id = client.deployments.get_id(deployment) scoring_data = { client.deployments.ScoringMetaNames.INPUT_DATA: [ { 'fields': ['Sepal.Length', 'Sepal.Width', 'Petal.Length', 'Petal.Width'], 'values': [[5.1, 3.5, 1.4, 0.2]] }] } predictions = client.deployments.score(deployment_id, scoring_data) print(predictions) ``` As we can see this is Iris Setosa flower. <a id="cleanup"></a> ## 5. Clean up If you want to clean up all created assets: - experiments - trainings - pipelines - model definitions - models - functions - deployments please follow up this sample [notebook](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Machine%20Learning%20artifacts%20management.ipynb). <a id="summary"></a> ## 6. Summary and next steps You successfully completed this notebook! You learned how to use Watson Machine Learning for PMML model deployment and scoring. Check out our [Online Documentation](https://dataplatform.cloud.ibm.com/docs/content/analyze-data/wml-setup.html) for more samples, tutorials, documentation, how-tos, and blog posts. ### Authors **Lukasz Cmielowski**, PhD, is a Software Architect and Data Scientist at IBM. Copyright © 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
github_jupyter
# Metadata preprocessing tutorial Melusine **prepare_data.metadata_engineering subpackage** provides classes to preprocess the metadata : - **MetaExtension :** a transformer which creates an 'extension' feature extracted from regex in metadata. It extracts the extensions of mail adresses. - **MetaDate :** a transformer which creates new features from dates such as: hour, minute, dayofweek. - **MetaAttachmentType :** a transformer which creates an 'attachment type' feature extracted from regex in metadata. It extracts the extensions of attached files. - **Dummifier :** a transformer to dummifies categorial features. All the classes have **fit_transform** methods. ### Input dataframe - To use a **MetaExtension** transformer : the dataframe requires a **from** column - To use a **MetaDate** transformer : the dataframe requires a **date** column - To use a **MetaAttachmentType** transformer : the dataframe requires a **attachment** column with the list of attached files ``` from melusine.data.data_loader import load_email_data import ast df_emails = load_email_data() df_emails = df_emails[['from','date', 'attachment']] df_emails['from'] df_emails['date'] df_emails['attachment'] = df_emails['attachment'].apply(ast.literal_eval) df_emails['attachment'] ``` ### MetaExtension transformer A **MetaExtension transformer** creates an *extension* feature extracted from regex in metadata. It extracts the extensions of mail adresses. ``` from melusine.prepare_email.metadata_engineering import MetaExtension meta_extension = MetaExtension() df_emails = meta_extension.fit_transform(df_emails) df_emails.extension ``` ### MetaDate transformer A **MetaDate transformer** creates new features from dates : hour, minute and dayofweek ``` from melusine.prepare_email.metadata_engineering import MetaDate meta_date = MetaDate() df_emails = meta_date.fit_transform(df_emails) df_emails.date[0] df_emails.hour[0] df_emails.loc[0,'min'] df_emails.dayofweek[0] ``` ### MetaAttachmentType transformer A **MetaAttachmentType transformer** creates an *attachment_type* feature extracted from an attachment names list. It extracts the extensions of attachments files. ``` from melusine.prepare_email.metadata_engineering import MetaAttachmentType meta_pj = MetaAttachmentType() df_emails = meta_pj.fit_transform(df_emails) df_emails.attachment_type ``` ### Dummifier transformer A **Dummifier transformer** dummifies categorial features. Its arguments are : - **columns_to_dummify** : a list of the metadata columns to dummify. ``` from melusine.prepare_email.metadata_engineering import Dummifier dummifier = Dummifier(columns_to_dummify=['extension','attachment_type', 'dayofweek', 'hour', 'min']) df_meta = dummifier.fit_transform(df_emails) df_meta.columns df_meta.head() df_meta.to_csv('./data/metadata.csv', index=False, encoding='utf-8', sep=';') ``` ### Custom metadata transformer A custom transformer can be implemented to extract metadata from a column : ```python from sklearn.base import BaseEstimator, TransformerMixin class MetaDataCustom(BaseEstimator, TransformerMixin): """Transformer which creates custom matadata Compatible with scikit-learn API. """ def __init__(self): """ arguments """ def fit(self, X, y=None): """ Fit method""" return self def transform(self, X): """Transform method""" X['custom_metadata'] = X['column'].apply(self.get_metadata) return X ``` The name of the output column can then be given as argument to a Dummifier transformer : ```python dummifier = Dummifier(columns_to_dummify=['custom_metadata']) ```
github_jupyter
<p><img alt="DataOwl" width=150 src="http://gwsolutions.cl/Images/dataowl.png", align="left", hspace=0, vspace=5></p> <h1 align="center">Aplicación de la derivada</h1> <h4 align="center">Ecuaciones de una variable y Optimización</h4> <pre><div align="center"> La idea de este notebook es que sirva para iniciarse en conceptos matemáticos para aplicar la derivada numérica en la resolución de ecuaciones de una variable y optimización.</div> # Aplicaciones de la derivada En clases anteriores, abordamos el problema de encontrar dónde una función se anula. En este Notebook veremos que las derivadas también nos pueden ayudar en este desafío, además de poder aplicarse en otros problemas, como la aproximación de una función mediante polinomios y la optimización de una función. ## 4. Ecuaciones de una variable (continuación) ### 4.1 Método de la Secante <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/9/92/Secant_method.svg/450px-Secant_method.svg.png" alt="Método de la secante" width=280 align="center" hspace=0 vspace=5 padding:5px /> De la clase anterior, sabemos que una función $f$ puede ser cortada en dos de sus puntos mediante una recta llamada *secante* Esta recta tiene una ecuación bien definida, esta vez dada por los puntos $(x_0,f(x_0))$, $(x_1,f(x_1))$ y la fórmula $$y\ =\ \frac{f(x_1)-f(x_0)}{x_1-x_0}(x-x_1)+f(x_1)$$ Para encontrar un valor $x$ en que $f(x)=0$, se puede aproximar el resultado esperado utilizando $y=0$ en la fórmula anterior. Esto da lugar a una solución parcial $$x = x_1-f(x_1)\frac{x_1-x_0}{f(x_1)-f(x_0)}$$ Esto se puede extender de forma iterativa, generando una sucesión de valores $x_n$ que se aproximan a la solución real: $$x_n = x_{n-1}-f(x_{n-1})\frac{x_{n-1}-x_{n-2}}{f(x_{n-1})-f(x_{n-2})}$$ Esto depende de la elección de dos puntos de inicio, $x_0$ y $x_1$, además de algunas propiedades que debe satisfacer $f$, que mencionaremos dentro de poco. Este método se conoce como **Método de la Secante**. ### 4.2 Método de Newton-Raphson <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/e/e0/NewtonIteration_Ani.gif/450px-NewtonIteration_Ani.gif" width=450 alt="Método de Newton-Raphson" align="center"/> Del mismo modo, si la diferencia entre $x_{n-1}$ y $x_{n-2}$ es "pequeña", se puede aproximar la sucesión anterior a la fórmula $$x_n = x_{n-1}-\frac{f(x_{n-1})}{f'(x_{n-1})}$$ donde ahora la recurrencia sólo depende de un paso anterior, por lo cual se requiere un solo punto $x_0$ de inicio. Además, este último método converge más rápido que el Método de la Secante, y se conoce como **Método de Newton-Raphson**. ### 4.3 Hipótesis necesarias Es necesario notar que ambos métodos tienen sus limitaciones, siendo las más importante que haya **sólo un cero** (para el Método de la Secante), que $f'(x)\neq0$ (para el Método de Newton-Raphson) y que la función sea continuamente dos veces derivable. Este último punto nos obliga a definir qué es ser "continuamente dos veces derivable". Sin embargo, la forma más inmediata de abordar esto, es simplemente decir que el método utilizado para calcular la derivada de $f$ lo usamos ahora para calcular la derivada de $f'$, y que el resultado es continuo, en el sentido que vimos en clases anteriores. Lo que se consigue es llamado **segunda derivada** de $f$, y se denota $\frac{d^2f}{dx^2}(x)$ o $f''(x)$. ## 5. Optimización El objetivo de esta rama de las Matemáticas es encontrar dónde las funciones alcanzan su valor máximo o mínimo, qué condiciones deben cumplirse para que éstos existan y de qué forma se puede aproximar dichos valores. En esta sección, veremos ideas básicas de optimización en funciones reales diferenciables con derivada continua, en problemas irrestrictos. Recordemos que la derivada de una función $f$ representa en un punto $x$ cuánto vale la pendiente de la recta tangente a su curva, en ese punto. Por lo tanto, cuando $f'(x)>0$, se dice que la función crece entorno a ese punto, y decrece cuando $f'(x)<0$. Como vimos en el ejercicio de encontrar ceros en una función continua, el hecho de que haya un cambio de signo en la función $f'$ implica que debe existir un valor $\bar{x}$ en que $f(\bar{x})=0$. Un punto $\bar{x}$ con esta característica se llama **punto estacionario**, ya que ahí la función no crece ni decrece. Esto indica que, si encontramos dicho valor $\bar{x}$, éste será un *candidato* a máximo o mínimo (¡existen los *puntos silla*!). Ya conocemos formas de encontrar ceros de una función. Podemos aplicarlo en nuestra función $f'$ para encontrar, aproximadamente, dónde ésta se anula y así tener lo candidatos a óptimo. Para conocer mejor la naturaleza de el candidato $\bar{x}$, será necesario calcular $f''(\bar{x})$. Si se obtiene que $f''(\bar{x})>0$, sin duda $\bar{x}$ será **mínimo**, mientras que si $f''(\bar{x})<0$, $\bar{x}$ será **máximo**. El caso $f''(\bar{x})=0$ es más problemático, aunque matemáticamente es abordable y visualmente lo es aún más. ``` # Importando las librerías %matplotlib notebook import numpy as np import matplotlib.colors as mcolors # Nos permite utilizar una paleta de colores más amplia import matplotlib.pyplot as plt import experimento5 as ex def f(x): # Alguna función de ejemplo return 0.5 - np.sin(2 * x) def g(x): # Alguna función de ejemplo return (np.exp(-x ** 2) - x) / ((x + 1) ** 2 + (x - 1) ** 2) def h(x): return np.exp(x) # Probamos nuestras funciones para calcular derivadas, con distintos tamaños de arreglo y dx x = np.linspace(-np.pi, np.pi, 1000) y = g(x) dydx1 = ex.derivadavec(x, y) dydx2 = ex.derivadafun(0, np.pi/2, g, 0.001) x2 = np.linspace(0, np.pi/2, len(dydx2)) plt.plot(x, dydx1, color='gold', label='Derivada vector', zorder=0) plt.plot(x2, dydx2, color='crimson', label='Derivada función', zorder=1) plt.plot(x, y, color='gray', label='Función', zorder=2) plt.xlabel('x') plt.ylabel('y') plt.title('Comparación de cálculo de derivadas') plt.legend() plt.grid() plt.show() # Buscamos los ceros de g(x) x0, y0 = ex.ceros(-3, 3, g) print(x0) # Probamos nuestras funciones para calcular derivadas, con distintos tamaños de arreglo y dx x = np.linspace(-np.pi, np.pi, 10000) y = g(x) dydx = ex.derivadavec(x, y) d2ydx2 = ex.derivadavec(x, dydx) x0, y0 = ex.ceros(-5, 5, g, x[1]-x[0]) plt.plot(x, y, color='gray', label='g(x)', zorder=0) plt.plot(x, dydx, color='red', label='g´(x)', zorder=1) plt.plot(x, d2ydx2, color='blue', label='g´´(x)', zorder=2) plt.plot(x0, y0, marker='*', color='green', label='Cero de g', linestyle='', zorder=3) plt.xlabel('x') plt.ylabel('y') plt.title('Función y sus derivadas') plt.legend() plt.grid() plt.show() # Incluimos el cálculo de los x en que f(x)=0 x1, y1 = ex.cerof(x, dydx, x[1]-x[0]) # Probamos nuestras funciones para calcular derivadas, con distintos tamaños de arreglo y dx x = np.linspace(-np.pi, np.pi, 10000) y = g(x) dydx = ex.derivadavec(x, y) d2ydx2 = ex.derivadavec(x, dydx) plt.plot(x, y, color='gray', label='g(x)', zorder=0) plt.plot(x, dydx, color='red', label='g´(x)', zorder=1) plt.plot(x, d2ydx2, color='blue', label='g´´(x)', zorder=2) plt.plot(x1, y1, marker='*', color='green', label='Cero de g', linestyle='', zorder=3) plt.xlabel('x') plt.ylabel('y') plt.title('Función y sus derivadas') plt.legend() plt.grid() plt.show() ``` ¿Cómo podríamos encontrar el valor de f''(x) en los valores x que encontramos? ## Ejercicios **1.-** Intente escribir un código para utilizar el Método de la Secante y el Método de Newton-Raphson y aplíquelo a alguna de las funciones vistas. **2.-** **a)** En relación con el problema de encontrar $x\in[a,b]$ tal que $f(x)\ =\ 0$, busque (por ejemplo, en Wikipedia) información sobre el Método de Householder o *Householder's Method*. Note que el método de Newton-Raphson es uno de estos modelos, pero que hay casos en que se usa derivadas de orden superior. Intente escribir un algoritmo con alguno de esos métodos (incluso puede hacer un algoritmo que permita utilizar cualquiera de los métodos), y aplíquelo a la función $$f(x)\ =\ \frac{e^{-x^2}-x^3}{(x+1)^2+(x-1)^2}$$ Para ello, grafique esa función en algún intervalo en que se sepa que la función se anula. Puede ayudarse con el uso de una grilla, escribiendo ```Python plt.grid() # Para desplegar la grilla plt.show() # Para mostrar el gráfico ``` y tome un valor inicial $x_0$ que visualmente se halle cercano a la solución. **b)** Haga lo mismo que antes, buscando información sobre el Método de Halley (o *Halley's Method*). **3.-** Utilice el Notebook y cualquiera de los métodos vistos o los definidos en clase para estudiar las siguientes funciones: <ol style="list-style-type:lower-alpha"> <li>$\qquad f(x) = x^p,\quad p\in\mathbb{R}$. Pruebe con distintos valores de $p$ (distinga entre $p\ge0$ y $p<0$ <br><br></li> <li>$\qquad g(x) = \frac{x}{\sqrt{x^2+1}}$ <br><br></li> <li>$\qquad h(x) = \frac{\sin^2(x)}{x},\quad x\neq0$</li> </ol> **4.-** Intente programar un algoritmo para encontrar los mínimos y máximos de una función $f$, si los tiene.
github_jupyter
# Results summary | Logistic Regression | LightGBM Classifier | Logistic Regression + ATgfe | |-------------------------------------------------------------------------|------------------------------------------------------------------------|--------------------------------------------------------------------| | <ul> <li>10-CV Accuracy: 0.926</li><li>Test-data Accuracy: 0.911</li><li>ROC_AUC: 0.99</li> </ul> | <ul> <li>10-CV Accuracy: 0.946</li><li>Test-data Accuracy: 0.977</li><li>ROC_AUC: 1.0</li> </ul> | <ul> <li>10-CV Accuracy: **0.98**</li><li>Test-data Accuracy: **1.0**</li><li>ROC_AUC: **1.0**</li> </ul> | # Import packages ``` from atgfe.GeneticFeatureEngineer import GeneticFeatureEngineer import pandas as pd import numpy as np from sklearn.model_selection import train_test_split, cross_val_score from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.pipeline import make_pipeline from sklearn.compose import make_column_transformer from sklearn.metrics import accuracy_score, make_scorer, balanced_accuracy_score, recall_score from yellowbrick.classifier import ClassificationReport, ConfusionMatrix, ROCAUC, PrecisionRecallCurve from lightgbm import LGBMClassifier from sklearn import datasets def prepare_column_names(columns): return [col.replace(' ', '').replace('(cm)', '_cm') for col in columns] sklearn_data = datasets.load_iris() columns = prepare_column_names(sklearn_data.feature_names) df = pd.DataFrame(data=sklearn_data.data, columns=columns) df['class'] = sklearn_data.target df['class'] = df['class'].astype(str) df.head() target = 'class' X = df.drop(target, axis=1).copy() Y = df.loc[:, target].copy() X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=42) classes = ['setosa', 'versicolor', 'virginica'] numerical_features = X.columns.tolist() def classification_report(model): visualizer = ClassificationReport(model, classes=classes, support=True) visualizer.fit(X_train, y_train) visualizer.score(X_test, y_test) visualizer.poof() def roc_auc(model): visualizer = ROCAUC(model, classes=classes) visualizer.fit(X_train, y_train) visualizer.score(X_test, y_test) visualizer.poof() def confusion_matrix(model): visualizer = ConfusionMatrix(model, classes=classes) visualizer.fit(X_train, y_train) visualizer.score(X_test, y_test) visualizer.poof() def precision_recall_curve(model): visualizer = PrecisionRecallCurve(model) visualizer.fit(X_train, y_train) visualizer.score(X_test, y_test) visualizer.poof() def score_model(model, X, y): evaluation_metric_scorer = make_scorer(balanced_accuracy_score, greater_is_better=True) scores = cross_val_score(estimator=model, X=X, y=y, cv=10, scoring=evaluation_metric_scorer, n_jobs=-1) scores_mean = scores.mean() score_std = scores.std() print('Mean of metric: {}, std: {}'.format(scores_mean, score_std)) def score_test_data_for_model(model, X_test, y_test): model.fit(X_train, y_train) y_pred = model.predict(X_test) print('Balanced Accuracy: {}'.format(balanced_accuracy_score(y_test, y_pred))) print('Accuracy: {}'.format(accuracy_score(y_test, y_pred))) def create_new_model(): model = make_pipeline(StandardScaler(), LogisticRegression(random_state=77, n_jobs=-1, solver='saga')) return model ``` # Using LightGBM ``` lgbm_model = LGBMClassifier(n_estimators=100, random_state=7) score_model(lgbm_model, X, Y) classification_report(lgbm_model) confusion_matrix(lgbm_model) precision_recall_curve(lgbm_model) roc_auc(lgbm_model) lgbm_model.fit(X_train, y_train) score_test_data_for_model(lgbm_model, X_test, y_test) ``` # Using Logistic Regression ``` model = create_new_model() score_model(model, X, Y) classification_report(model) confusion_matrix(model) precision_recall_curve(model) roc_auc(model) score_test_data_for_model(model, X_test, y_test) ``` # Using ATgfe ``` model = create_new_model() def micro_recall_score(y_true, y_pred): return recall_score(y_true, y_pred, average='micro') gfe = GeneticFeatureEngineer(model, x_train=X_train, y_train=y_train, numerical_features=numerical_features, number_of_candidate_features=2, number_of_interacting_features=4, evaluation_metric=micro_recall_score, minimize_metric=False, enable_weights=True, n_jobs=62, cross_validation_in_objective_func=True, objective_func_cv=3) gfe.fit(mu=10, lambda_=120, early_stopping_patience=5, mutation_probability=0.4, crossover_probability=0.6) ``` # Apply GFE ``` new_X = gfe.transform(X) new_X.head(20) model = create_new_model() score_model(model, new_X, Y) X_train, X_test, y_train, y_test = train_test_split(new_X, Y, test_size=0.3, random_state=42) classification_report(model) confusion_matrix(model) precision_recall_curve(model) roc_auc(model) score_test_data_for_model(model, X_test, y_test) ```
github_jupyter
``` import h2o h2o.init(max_mem_size = 2) #uses all cores by default h2o.remove_all() %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd from h2o.estimators.deeplearning import H2ODeepLearningEstimator higgs = h2o.import_file('higgs_boston_train.csv') higgs.head() higgs.shape higgs_df = higgs.as_data_frame(use_pandas=True) higgs_df['Label'].value_counts() higgs.describe() train, valid, test = higgs.split_frame([0.6, 0.2], seed = 2019) higgs_X = higgs.col_names[1: -1] higgs_y = higgs.col_names[-1] higgs_model_v1 = H2ODeepLearningEstimator(model_id = 'higgs_v1', epochs = 1, variable_importances = True) higgs_model_v1.train(higgs_X, higgs_y, training_frame = train, validation_frame = valid) print(higgs_model_v1) var_df = pd.DataFrame(higgs_model_v1.varimp(), columns = ['Variable', 'Relative Importance', 'Scaled Importance', 'Percentage']) print(var_df.shape) var_df.head(10) higgs_v1_df = higgs_model_v1.score_history() higgs_v1_df plt.plot(higgs_v1_df['training_classification_error'], label="training_classification_error") plt.plot(higgs_v1_df['validation_classification_error'], label="validation_classification_error") plt.title("Higgs Deep Learner") plt.legend(); pred = higgs_model_v1.predict(test[1:-1]).as_data_frame(use_pandas=True) test_actual = test.as_data_frame(use_pandas=True)['Label'] (test_actual == pred['predict']).mean() higgs_model_v2 = H2ODeepLearningEstimator(model_id = 'higgs_v2', hidden = [32, 32, 32], epochs = 1000000, score_validation_samples = 10000, stopping_rounds = 2, stopping_metric = 'misclassification', stopping_tolerance = 0.01) higgs_model_v2.train(higgs_X, higgs_y, training_frame = train, validation_frame = valid) higgs_v2_df = higgs_model_v2.score_history() higgs_v2_df plt.plot(higgs_v2_df['training_classification_error'], label="training_classification_error") plt.plot(higgs_v2_df['validation_classification_error'], label="validation_classification_error") plt.title("Higgs Deep Learner (Early Stop)") plt.legend(); pred = higgs_model_v2.predict(test[1:-1]).as_data_frame(use_pandas=True) test_actual = test.as_data_frame(use_pandas=True)['Label'] (test_actual == pred['predict']).mean() higgs_model_v2.varimp_plot(); from h2o.automl import H2OAutoML aml = H2OAutoML(max_models = 10, max_runtime_secs=100, seed = 1) aml.train(higgs_X, higgs_y, training_frame = train, validation_frame = valid) aml.leaderboard ``` AutoML has built 5 models inlcuding GLM, DRF (Distributed Random Forest) and XRT (Extremely Randomized Trees) and two stacked ensemble models (the 2nd and 3rd) and the best model is XRT. It turns out, my proud deep learning models are not even on the leaderboard.
github_jupyter
# Objective * 20181225: * Predict stock price in next day using XGBoost * Given prices and other features for the last N days, we do prediction for day N+1 * Here we split 3 years of data into train(60%), dev(20%) and test(20%) * 20190110 - Diff from StockPricePrediction_v1_xgboost.ipynb: * Here we scale the train set to have mean 0 and variance 1, and apply the same transformation to dev and test sets * 20190111 - Diff from StockPricePrediction_v1a_xgboost.ipynb: * Here for the past N values for the dev set, we scale them to have mean 0 and variance 1, and do prediction on them ``` import math import numpy as np import pandas as pd import seaborn as sns import time from matplotlib import pyplot as plt from pylab import rcParams from sklearn.metrics import mean_squared_error from sklearn.preprocessing import StandardScaler from tqdm import tqdm_notebook from xgboost import XGBRegressor %matplotlib inline #### Input params ################## stk_path = "./data/VTI.csv" test_size = 0.2 # proportion of dataset to be used as test set cv_size = 0.2 # proportion of dataset to be used as cross-validation set N = 7 # for feature at day t, we use lags from t-1, t-2, ..., t-N as features n_estimators = 100 # for the initial model before tuning. default = 100 max_depth = 3 # for the initial model before tuning. default = 3 learning_rate = 0.1 # for the initial model before tuning. default = 0.1 min_child_weight = 1 # for the initial model before tuning. default = 1 subsample = 1 # for the initial model before tuning. default = 1 colsample_bytree = 1 # for the initial model before tuning. default = 1 colsample_bylevel = 1 # for the initial model before tuning. default = 1 train_test_split_seed = 111 # 111 model_seed = 100 fontsize = 14 ticklabelsize = 14 #################################### ``` # Load data ``` df = pd.read_csv(stk_path, sep = ",") # Convert Date column to datetime df.loc[:, 'Date'] = pd.to_datetime(df['Date'],format='%Y-%m-%d') # Change all column headings to be lower case, and remove spacing df.columns = [str(x).lower().replace(' ', '_') for x in df.columns] # Get month of each sample df['month'] = df['date'].dt.month # Sort by datetime df.sort_values(by='date', inplace=True, ascending=True) df.head() # Plot adjusted close over time rcParams['figure.figsize'] = 10, 8 # width 10, height 8 ax = df.plot(x='date', y='adj_close', style='b-', grid=True) ax.set_xlabel("date") ax.set_ylabel("USD") ``` # Split into train, dev and test set ``` # Get sizes of each of the datasets num_cv = int(cv_size*len(df)) num_test = int(test_size*len(df)) num_train = len(df) - num_cv - num_test print("num_train = " + str(num_train)) print("num_cv = " + str(num_cv)) print("num_test = " + str(num_test)) # Split into train, cv, and test train = df[:num_train] cv = df[num_train:num_train+num_cv] train_cv = df[:num_train+num_cv] test = df[num_train+num_cv:] print("train.shape = " + str(train.shape)) print("cv.shape = " + str(cv.shape)) print("train_cv.shape = " + str(train_cv.shape)) print("test.shape = " + str(test.shape)) ``` # Scale the train, dev and test set and combine them to do feature engineering ``` # Converting dataset into x_train and y_train # Here we only scale the train dataset, and not the entire dataset to prevent information leak scaler = StandardScaler() train_scaled = scaler.fit_transform(train[['open', 'high', 'low', 'close', 'adj_close', 'volume']]) print("scaler.mean_ = " + str(scaler.mean_)) print("scaler.var_ = " + str(scaler.var_)) print("train_scaled.shape = " + str(train_scaled.shape)) # Convert the numpy array back into pandas dataframe train_scaled = pd.DataFrame(train_scaled, columns=['open', 'high', 'low', 'close', 'adj_close', 'volume']) train_scaled[['date', 'month']] = train[['date', 'month']] print("train_scaled.shape = " + str(train_scaled.shape)) train_scaled.head() # Do scaling for dev set cv_scaled = scaler.transform(cv[['open', 'high', 'low', 'close', 'adj_close', 'volume']]) # Convert the numpy array back into pandas dataframe cv_scaled = pd.DataFrame(cv_scaled, columns=['open', 'high', 'low', 'close', 'adj_close', 'volume']) cv_scaled[['date', 'month']] = cv.reset_index()[['date', 'month']] print("cv_scaled.shape = " + str(cv_scaled.shape)) cv_scaled.head() # Do scaling for test set test_scaled = scaler.transform(test[['open', 'high', 'low', 'close', 'adj_close', 'volume']]) # Convert the numpy array back into pandas dataframe test_scaled = pd.DataFrame(test_scaled, columns=['open', 'high', 'low', 'close', 'adj_close', 'volume']) test_scaled[['date', 'month']] = test.reset_index()[['date', 'month']] print("test_scaled.shape = " + str(test_scaled.shape)) test_scaled.head() # Combine back train_scaled, cv_scaled, test_scaled together df_scaled = pd.concat([train_scaled, cv_scaled, test_scaled], axis=0) df_scaled.head() ``` # Feature Engineering We will generate the following features: * Mean 'adj_close' of each month * Difference between high and low of each day * Difference between open and close of each day * Mean volume of each month ``` # Get difference between high and low of each day df_scaled['range_hl'] = df_scaled['high'] - df_scaled['low'] df_scaled.drop(['high', 'low'], axis=1, inplace=True) # Get difference between open and close of each day df_scaled['range_oc'] = df_scaled['open'] - df_scaled['close'] df_scaled.drop(['open', 'close'], axis=1, inplace=True) df_scaled.head() ``` Now we use lags up to N number of days to use as features. ``` # Add a column 'order_day' to indicate the order of the rows by date df_scaled['order_day'] = [x for x in list(range(len(df_scaled)))] # merging_keys merging_keys = ['order_day'] # List of columns that we will use to create lags lag_cols = ['adj_close', 'range_hl', 'range_oc', 'volume'] lag_cols shift_range = [x+1 for x in range(N)] for shift in tqdm_notebook(shift_range): train_shift = df_scaled[merging_keys + lag_cols].copy() # E.g. order_day of 0 becomes 1, for shift = 1. # So when this is merged with order_day of 1 in df_scaled, this will represent lag of 1. train_shift['order_day'] = train_shift['order_day'] + shift foo = lambda x: '{}_lag_{}'.format(x, shift) if x in lag_cols else x train_shift = train_shift.rename(columns=foo) df_scaled = pd.merge(df_scaled, train_shift, on=merging_keys, how='left') #.fillna(0) del train_shift # Remove the first N rows which contain NaNs df_scaled = df_scaled[N:] df_scaled.head() df_scaled.info() # # Get mean of adj_close of each month # df_gb = df.groupby(['month'], as_index=False).agg({'adj_close':'mean'}) # df_gb = df_gb.rename(columns={'adj_close':'adj_close_mean'}) # df_gb # # Merge to main df # df = df.merge(df_gb, # left_on=['month'], # right_on=['month'], # how='left').fillna(0) # # Merge to main df # shift_range = [x+1 for x in range(2)] # for shift in tqdm_notebook(shift_range): # train_shift = df[merging_keys + lag_cols].copy() # # E.g. order_day of 0 becomes 1, for shift = 1. # # So when this is merged with order_day of 1 in df, this will represent lag of 1. # train_shift['order_day'] = train_shift['order_day'] + shift # foo = lambda x: '{}_lag_{}'.format(x, shift) if x in lag_cols else x # train_shift = train_shift.rename(columns=foo) # df = pd.merge(df, train_shift, on=merging_keys, how='left') #.fillna(0) # del train_shift # df # # Get mean of volume of each month # df_gb = df.groupby(['month'], as_index=False).agg({'volume':'mean'}) # df_gb = df_gb.rename(columns={'volume':'volume_mean'}) # df_gb # # Merge to main df # df = df.merge(df_gb, # left_on=['month'], # right_on=['month'], # how='left').fillna(0) # df.head() ``` # Split the scaled features back into train, dev and test set ``` features = [ "adj_close_lag_1", "range_hl_lag_1", "range_oc_lag_1", "volume_lag_1", "adj_close_lag_2", "range_hl_lag_2", "range_oc_lag_2", "volume_lag_2", "adj_close_lag_3", "range_hl_lag_3", "range_oc_lag_3", "volume_lag_3", "adj_close_lag_4", "range_hl_lag_4", "range_oc_lag_4", "volume_lag_4", "adj_close_lag_5", "range_hl_lag_5", "range_oc_lag_5", "volume_lag_5", "adj_close_lag_6", "range_hl_lag_6", "range_oc_lag_6", "volume_lag_6", "adj_close_lag_7", "range_hl_lag_7", "range_oc_lag_7", "volume_lag_7" ] target = "adj_close" # Split into train, cv, and test train = df_scaled[:num_train] cv = df_scaled[num_train:num_train+num_cv] train_cv = df_scaled[:num_train+num_cv] test = df_scaled[num_train+num_cv:] # Split into X and y X_train = train[features] y_train = train[target] X_cv = cv[features] y_cv = cv[target] X_train_cv = train_cv[features] y_train_cv = train_cv[target] X_sample = test[features] y_sample = test[target] print("X_train.shape = " + str(X_train.shape)) print("y_train.shape = " + str(y_train.shape)) print("X_cv.shape = " + str(X_cv.shape)) print("y_cv.shape = " + str(y_cv.shape)) print("X_train_cv.shape = " + str(X_train_cv.shape)) print("y_train_cv.shape = " + str(y_train_cv.shape)) print("X_sample.shape = " + str(X_sample.shape)) print("y_sample.shape = " + str(y_sample.shape)) ``` # EDA ``` # Plot adjusted close over time rcParams['figure.figsize'] = 10, 8 # width 10, height 8 ax = train.plot(x='date', y='adj_close', style='b-', grid=True) ax = cv.plot(x='date', y='adj_close', style='y-', grid=True, ax=ax) ax = test.plot(x='date', y='adj_close', style='g-', grid=True, ax=ax) ax.legend(['train', 'dev', 'test']) ax.set_xlabel("date") ax.set_ylabel("USD (scaled)") ``` # Train the model using XGBoost ``` # Create the model model = XGBRegressor(seed=model_seed, n_estimators=n_estimators, max_depth=max_depth, learning_rate=learning_rate, min_child_weight=min_child_weight) # Train the regressor model.fit(X_train, y_train) ``` # Predict on train set ``` # Do prediction on train set est = model.predict(X_train) # Calculate RMSE math.sqrt(mean_squared_error(y_train, est)) # Plot adjusted close over time rcParams['figure.figsize'] = 10, 8 # width 10, height 8 est_df = pd.DataFrame({'est': est, 'date': train['date']}) ax = train.plot(x='date', y='adj_close', style='b-', grid=True) ax = cv.plot(x='date', y='adj_close', style='y-', grid=True, ax=ax) ax = test.plot(x='date', y='adj_close', style='g-', grid=True, ax=ax) ax = est_df.plot(x='date', y='est', style='r-', grid=True, ax=ax) ax.legend(['train', 'dev', 'test', 'est']) ax.set_xlabel("date") ax.set_ylabel("USD (scaled)") ``` # Predict on dev set ``` # Do prediction on test set est = model.predict(X_cv) # Calculate RMSE math.sqrt(mean_squared_error(y_cv, est)) # Plot adjusted close over time rcParams['figure.figsize'] = 10, 8 # width 10, height 8 est_df = pd.DataFrame({'est': est, 'y_cv': y_cv, 'date': cv['date']}) ax = train.plot(x='date', y='adj_close', style='b-', grid=True) ax = cv.plot(x='date', y='adj_close', style='y-', grid=True, ax=ax) ax = test.plot(x='date', y='adj_close', style='g-', grid=True, ax=ax) ax = est_df.plot(x='date', y='est', style='r-', grid=True, ax=ax) ax.legend(['train', 'dev', 'test', 'est']) ax.set_xlabel("date") ax.set_ylabel("USD (scaled)") ``` # Findings * Doesn't work well * Likely because the model was trained on prices below ~1.7 and so when it saw prices above 1.7 for the dev set, it could not generalize well
github_jupyter
## In this notebook: - Using a pre-trained convnet to do feature extraction - Use ConvBase only for feature extraction, and use a separate machine learning classifier - Adding ```Dense``` layers to top of a frozen ConvBase, allowing us to leverage data augmentation - Fine-tuning a pre-trained convnet (Skipped, because I am tired now) ### In previous notebook: - Training your own small convnets from scratch - Using data augmentation to mitigate overfitting ``` from datetime import date date.today() author = "NirantK. https://github.com/NirantK/keras-practice" print(author) import keras print('Keras Version:', keras.__version__) import os if os.name=='nt': print('We are on Windows') import os, shutil pwd = os.getcwd() ``` Feature extraction --- This consists of using the representations learned by a previous network to extract interesting features from new samples. These features are then run through a new classifier, which is trained from scratch. ![](https://dpzbhybb2pdcj.cloudfront.net/chollet/v-6/Figures/swapping_fc_classifier.png) Warning: The line below triggers a download. You need good speed Internet! ``` from keras.applications import VGG16 conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3)) ``` We passed three arguments to the constructor: - **```weights```**, to specify which weight checkpoint to initialize the model from - **```include_top```**, which refers to including or not the densely-connected classifier on top of the network. By default, this densely-connected classifier would correspond to the 1000 classes from ImageNet. Since we intend to use our own densely-connected classifier (with only two classes, cat and dog), we don’t need to include it. - **```input_shape```**, the shape of the image tensors that we will feed to the network. This argument is purely optional: if we don’t pass it, then the network will be able to process inputs of any size. (from *Deep Learning in Python by F. Chollet*) What does the **VGG16** thing look like? ``` conv_base.summary() ``` Feature Extraction --- Pros: - Fast, and cheap - Works on CPU Cons: - Does not allow us to use data augmentation - Because we do feature extraction and classification in separate steps ``` import os import numpy as np from keras.preprocessing.image import ImageDataGenerator base_dir = os.path.join(pwd, 'data/cats_and_dogs_small/') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') test_dir = os.path.join(base_dir, 'test') datagen = ImageDataGenerator(rescale=1./255) batch_size = 1 def extract_features(directory, sample_count): features = np.zeros(shape=(sample_count, 4, 4, 512)) labels = np.zeros(shape=(sample_count)) generator = datagen.flow_from_directory( directory, target_size=(150, 150), batch_size=batch_size, class_mode='binary') i = 0 for inputs_batch, labels_batch in generator: features_batch = conv_base.predict(inputs_batch) try: features[i * batch_size : (i + 1) * batch_size] = features_batch except ValueError: print(i) raise ValueError labels[i * batch_size : (i + 1) * batch_size] = labels_batch i += 1 if i * batch_size >= sample_count: # Note that since generators yield data indefinitely in a loop, # we must `break` after every image has been seen once. break return features, labels %time train_features, train_labels = extract_features(train_dir, 2000) %time validation_features, validation_labels = extract_features(validation_dir, 1000) %time test_features, test_labels = extract_features(test_dir, 1000) train_features = np.reshape(train_features, (2000, 4 * 4 * 512)) validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512)) test_features = np.reshape(test_features, (1000, 4 * 4 * 512)) ``` **Model Training:** ``` from keras import models from keras import layers from keras import optimizers model = models.Sequential() model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512)) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer=optimizers.RMSprop(lr=2e-5), loss='binary_crossentropy', metrics=['acc']) %time history = model.fit(train_features, train_labels, epochs=15, batch_size=20, validation_data=(validation_features, validation_labels)) model.save('cats_and_dogs_small_feature_extraction.h5') import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() ``` This is Overfitting! --- We can see that the training and validation accuracy curve diverge from each other rather quickly. This alone is might not be a sure shot sign of overfitting. We also observe that the training loss drops smoothly while validation loss actually increases. These two graphs in conjunction with each other indicate overfittig. **Why did this overfit despite dropout?** We did *NOT* do data augmentation Extending the ConvBase Model! --- Pros: - Better performance (accuracy) - Better Generalization (less overfitting) - Because we can use data augmentation Cons: - Expensive compute **Warning: Do not attempt this without a GPU. Your Python process can/will crash after a few hours** ``` from keras import models from keras import layers model = models.Sequential() model.add(conv_base) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.summary() ``` ### Freezing ConvBase model: VGG16 Freezing means we do not update the layer weights in those particular layers. This is important for our present application. ``` print('This is the number of trainable weights ' 'before freezing the conv base:', len(model.trainable_weights)) conv_base.trainable = False print('This is the number of trainable weights ' 'after freezing the conv base:', len(model.trainable_weights)) model.summary() # compare the Trainable Params value from the previous model summary from keras.preprocessing.image import ImageDataGenerator # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator(rescale=1./255) train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=20, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary') model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=2e-5), metrics=['acc']) history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50) acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # just for reference, let's calculate the test accuracy test_generator = test_datagen.flow_from_directory( test_dir, target_size=(150, 150), batch_size=20, class_mode='binary') %time test_loss, test_acc = model.evaluate_generator(test_generator, steps=50) print('test acc:', test_acc) ```
github_jupyter
# Gradient Descent Optimizations Mini-batch and stochastic gradient descent is widely used in deep learning, where the large number of parameters and limited memory make the use of more sophisticated optimization methods impractical. Many methods have been proposed to accelerate gradient descent in this context, and here we sketch the ideas behind some of the most popular algorithms. ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np ``` ## Smoothing with exponentially weighted averages ``` n = 50 x = np.arange(n) * np.pi y = np.cos(x) * np.exp(x/100) - 10*np.exp(-0.01*x) ``` ### Exponentially weighted average The exponentially weighted average adds a fraction $\beta$ of the current value to a leaky running sum of past values. Effectively, the contribution from the $t-n$th value is scaled by $$ \beta^n(1 - \beta) $$ For example, here are the contributions to the current value after 5 iterations (iteration 5 is the current iteration) | iteration | contribution | | --- | --- | | 1 | $\beta^4(1 - \beta)$ | | 2 | $\beta^3(1 - \beta)$ | | 3 | $\beta^2(1 - \beta)$ | | 4 | $\beta^1(1 - \beta)$ | | 5 | $(1 - \beta)$ | Since $\beta \lt 1$, the contribution decreases exponentially with the passage of time. Effectively, this acts as a smoother for a function. ``` def ewa(y, beta): """Exponentially weighted average.""" n = len(y) zs = np.zeros(n) z = 0 for i in range(n): z = beta*z + (1 - beta)*y[i] zs[i] = z return zs ``` ### Exponentially weighted average with bias correction Since the EWA starts from 0, there is an initial bias. This can be corrected by scaling with $$ \frac{1}{1 - \beta^t} $$ where $t$ is the iteration number. ``` def ewabc(y, beta): """Exponentially weighted average with bias correction.""" n = len(y) zs = np.zeros(n) z = 0 for i in range(n): z = beta*z + (1 - beta)*y[i] zc = z/(1 - beta**(i+1)) zs[i] = zc return zs beta = 0.9 plt.plot(x, y, 'o-') plt.plot(x, ewa(y, beta), c='red', label='EWA') plt.plot(x, ewabc(y, beta), c='orange', label='EWA with bias correction') plt.legend() pass ``` ## Momentum in 1D Momentum comes from physics, where the contribution of the gradient is to the velocity, not the position. Hence we create an accessory variable $v$ and increment it with the gradient. The position is then updated with the velocity in place of the gradient. The analogy is that we can think of the parameter $x$ as a particle in an energy well with potential energy $U = mgh$ where $h$ is given by our objective function $f$. The force generated is a function of the rat of change of potential energy $F \propto \nabla U \propto \nabla f$, and we use $F = ma$ to get that the acceleration $a \propto \nabla f$. Finally, we integrate $a$ over time to get the velocity $v$ and integrate $v$ to get the displacement $x$. Note that we need to damp the velocity otherwise the particle would just oscillate forever. We use a version of the update that simply treats the velocity as an exponentially weighted average popularized by Andrew Ng in his Coursera course. This is the same as the momentum scheme motivated by physics with some rescaling of constants. ``` def f(x): return x**2 def grad(x): return 2*x def gd(x, grad, alpha, max_iter=10): xs = np.zeros(1 + max_iter) xs[0] = x for i in range(max_iter): x = x - alpha * grad(x) xs[i+1] = x return xs def gd_momentum(x, grad, alpha, beta=0.9, max_iter=10): xs = np.zeros(1 + max_iter) xs[0] = x v = 0 for i in range(max_iter): v = beta*v + (1-beta)*grad(x) vc = v/(1+beta**(i+1)) x = x - alpha * vc xs[i+1] = x return xs ``` ### Gradient descent with moderate step size ``` alpha = 0.1 x0 = 1 xs = gd(x0, grad, alpha) xp = np.linspace(-1.2, 1.2, 100) plt.plot(xp, f(xp)) plt.plot(xs, f(xs), 'o-', c='red') for i, (x, y) in enumerate(zip(xs, f(xs)), 1): plt.text(x, y+0.2, i, bbox=dict(facecolor='yellow', alpha=0.5), fontsize=14) pass ``` ### Gradient descent with large step size When the step size is too large, gradient descent can oscillate and even diverge. ``` alpha = 0.95 xs = gd(1, grad, alpha) xp = np.linspace(-1.2, 1.2, 100) plt.plot(xp, f(xp)) plt.plot(xs, f(xs), 'o-', c='red') for i, (x, y) in enumerate(zip(xs, f(xs)), 1): plt.text(x*1.2, y, i, bbox=dict(facecolor='yellow', alpha=0.5), fontsize=14) pass ``` ### Gradient descent with momentum Momentum results in cancellation of gradient changes in opposite directions, and hence damps out oscillations while amplifying consistent changes in the same direction. This is perhaps clearer in the 2D example below. ``` alpha = 0.95 xs = gd_momentum(1, grad, alpha, beta=0.9) xp = np.linspace(-1.2, 1.2, 100) plt.plot(xp, f(xp)) plt.plot(xs, f(xs), 'o-', c='red') for i, (x, y) in enumerate(zip(xs, f(xs)), 1): plt.text(x, y+0.2, i, bbox=dict(facecolor='yellow', alpha=0.5), fontsize=14) pass ``` ## Momentum and RMSprop in 2D ``` def f2(x): return x[0]**2 + 100*x[1]**2 def grad2(x): return np.array([2*x[0], 200*x[1]]) x = np.linspace(-1.2, 1.2, 100) y = np.linspace(-1.2, 1.2, 100) X, Y = np.meshgrid(x, y) levels = [0.1,1,2,4,9, 16, 25, 36, 49, 64, 81, 100] Z = x**2 + 100*Y**2 c = plt.contour(X, Y, Z, levels) pass def gd2(x, grad, alpha, max_iter=10): xs = np.zeros((1 + max_iter, x.shape[0])) xs[0,:] = x for i in range(max_iter): x = x - alpha * grad(x) xs[i+1,:] = x return xs def gd2_momentum(x, grad, alpha, beta=0.9, max_iter=10): xs = np.zeros((1 + max_iter, x.shape[0])) xs[0, :] = x v = 0 for i in range(max_iter): v = beta*v + (1-beta)*grad(x) vc = v/(1+beta**(i+1)) x = x - alpha * vc xs[i+1, :] = x return xs ``` ### Gradient descent with large step size We get severe oscillations. ``` alpha = 0.01 x0 = np.array([-1,-1]) xs = gd2(x0, grad2, alpha, max_iter=75) x = np.linspace(-1.2, 1.2, 100) y = np.linspace(-1.2, 1.2, 100) X, Y = np.meshgrid(x, y) levels = [0.1,1,2,4,9, 16, 25, 36, 49, 64, 81, 100] Z = x**2 + 100*Y**2 c = plt.contour(X, Y, Z, levels) plt.plot(xs[:, 0], xs[:, 1], 'o-', c='red') plt.title('Vanilla gradient descent') pass ``` ### Gradient descent with momentum The damping effect is clear. ``` alpha = 0.01 x0 = np.array([-1,-1]) xs = gd2_momentum(x0, grad2, alpha, beta=0.9, max_iter=75) x = np.linspace(-1.2, 1.2, 100) y = np.linspace(-1.2, 1.2, 100) X, Y = np.meshgrid(x, y) levels = [0.1,1,2,4,9, 16, 25, 36, 49, 64, 81, 100] Z = x**2 + 100*Y**2 c = plt.contour(X, Y, Z, levels) plt.plot(xs[:, 0], xs[:, 1], 'o-', c='red') plt.title('Gradieent descent with momentum') pass ``` ### Gradient descent with RMSprop RMSprop scales the learning rate in each direction by the square root of the exponentially weighted sum of squared gradients. Near a saddle or any plateau, there are directions where the gradient is very small - RMSporp encourages larger steps in those directions, allowing faster escape. ``` def gd2_rmsprop(x, grad, alpha, beta=0.9, eps=1e-8, max_iter=10): xs = np.zeros((1 + max_iter, x.shape[0])) xs[0, :] = x v = 0 for i in range(max_iter): v = beta*v + (1-beta)*grad(x)**2 x = x - alpha * grad(x) / (eps + np.sqrt(v)) xs[i+1, :] = x return xs alpha = 0.1 x0 = np.array([-1,-1]) xs = gd2_rmsprop(x0, grad2, alpha, beta=0.9, max_iter=10) x = np.linspace(-1.2, 1.2, 100) y = np.linspace(-1.2, 1.2, 100) X, Y = np.meshgrid(x, y) levels = [0.1,1,2,4,9, 16, 25, 36, 49, 64, 81, 100] Z = x**2 + 100*Y**2 c = plt.contour(X, Y, Z, levels) plt.plot(xs[:, 0], xs[:, 1], 'o-', c='red') plt.title('Gradient descent with RMSprop') pass ``` ### ADAM ADAM (Adaptive Moment Estimation) combines the ideas of momentum, RMSprop and bias correction. It is probably the most popular gradient descent method in current deep learning practice. ``` def gd2_adam(x, grad, alpha, beta1=0.9, beta2=0.999, eps=1e-8, max_iter=10): xs = np.zeros((1 + max_iter, x.shape[0])) xs[0, :] = x m = 0 v = 0 for i in range(max_iter): m = beta1*m + (1-beta1)*grad(x) v = beta2*v + (1-beta2)*grad(x)**2 mc = m/(1+beta1**(i+1)) vc = v/(1+beta2**(i+1)) x = x - alpha * m / (eps + np.sqrt(vc)) xs[i+1, :] = x return xs alpha = 0.1 x0 = np.array([-1,-1]) xs = gd2_adam(x0, grad2, alpha, beta1=0.9, beta2=0.9, max_iter=10) x = np.linspace(-1.2, 1.2, 100) y = np.linspace(-1.2, 1.2, 100) X, Y = np.meshgrid(x, y) levels = [0.1,1,2,4,9, 16, 25, 36, 49, 64, 81, 100] Z = x**2 + 100*Y**2 c = plt.contour(X, Y, Z, levels) plt.plot(xs[:, 0], xs[:, 1], 'o-', c='red') plt.title('Gradient descent with RMSprop') pass ```
github_jupyter
# The Stick and Ball Geometry The ``SpheresAndCylinders`` class contains an assortment of pore-scale models that generate geometrical information assuming the pores are spherical and throats are cylindrical. The ``SpheresAndCylinders`` is a perfect starting point for generating your own custom geometry. In fact, it's likely that only the calculation of 'pore.diameter' would need to be changed. By default the 'pore.diameter' values are drawn from a random distribution which is not very realistic. Luckily, it's easy to update the model used to calculate diameter, and then propagate this change to all the dependent values (i.e. 'pore.volume'), as illustrated below. ``` import openpnm as op %config InlineBackend.figure_formats = ['svg'] import matplotlib.pyplot as plt pn = op.network.Cubic(shape=[20, 20, 20], spacing=100) ``` > The spacing of the above network is in um for this example to make values easier to read, but in general you should always use SI Now we can create a geometry object based on the ``SpheresAndCylinders``: ``` geo = op.geometry.SpheresAndCylinders(network=pn, pores=pn.Ps, throats=pn.Ts) ``` As can be seen by printing it, there are quite a few geometrical properties already added to this object. Defining these manually would have been a pain, so it's a good idea to start with this class then alter the few models that need it: ``` print(geo) ``` The pore size distribution on the ``SpheresAndCylinders`` is probably the more likely thing to change, since it is a random (i.e. uniform distribution) as shown below: ``` fig = plt.hist(geo['pore.diameter'], bins=25, edgecolor='k') ``` The models on the ``geo`` object can be seen by printing them: ``` print(geo.models) ``` In this tutorial we will change how pore sizes are calculated. We can do this by assigning a new pore-scale model for 'pore.diameter'. Let's use Gaussian distribution: ``` f = op.models.geometry.pore_size.normal geo.add_model(propname='pore.diameter', model=f, loc=50, scale=10) ``` This model is automatically run when it's assigned, so we can inspect the new pore diameter values: ``` fig = plt.hist(geo['pore.diameter'], bins=25, edgecolor='k') ``` The above distribution does not look very much like a Gaussian distribution. This is because the 'pore.seed' values are truncated between 0.2 and 0.7: ``` print(geo.models['pore.seed']) ``` We should change this to a wider range to capture more pores on the "tails", then call ``regenerate_models``, which will not only regenerate the random numbers, but all the other properties that depend on it such as 'pore.diameter', 'pore.volume', and so on: ``` geo.models['pore.seed']['num_range'] = [0.001, 0.999] geo.regenerate_models() fig = plt.hist(geo['pore.diameter'], bins=25, edgecolor='k') ``` A detailed example of adjusting pore-size distributions is given [here](./adjusting_pore_size_distributions.ipynb)
github_jupyter
# Welcome to Exkaldi In this section, we will train a n-grams language model and query it. Althrough __Srilm__ is avaliable in exkaldi, we recommend __Kenlm__ toolkit. ``` import exkaldi import os dataDir = "librispeech_dummy" ``` Firstly, prepare the lexicons. We have generated and saved a __LexiconBank__ object in file already (3_prepare_lexicons). So restorage it directly. ``` lexFile = os.path.join(dataDir, "exp", "lexicons.lex") lexicons = exkaldi.load_lex(lexFile) lexicons ``` We will use training text corpus to train LM model. Even though we have prepared a transcription file in the data directory, we do not need the utterance-ID information at the head of each line, so we must take a bit of work to produce a new text. We can lend a hand of the exkaldi __Transcription__ class. ``` textFile = os.path.join(dataDir, "train", "text") trans = exkaldi.load_transcription(textFile) trans newTextFile = os.path.join(dataDir, "exp", "train_lm_text") trans.save(fileName=newTextFile, discardUttID=True) ``` But actually, you don't need do this. If you use a __Transcription__ object to train the language model, the information of utterance ID will be discarded automatically. Now we train a 2-grams model with __Kenlm__ backend. (__srilm__ backend is also avaliable.) ``` arpaFile = os.path.join(dataDir, "exp", "2-gram.arpa") exkaldi.lm.train_ngrams_kenlm(lexicons, order=2, text=trans, outFile=arpaFile, config={"-S":"20%"}) ``` ARPA model can be transform to binary format in order to accelerate loading and reduce memory cost. Although __KenLm__ Python API supports reading ARPA format, but in exkaldi, we only expected KenLM Binary format. ``` binaryLmFile = os.path.join(dataDir, "exp", "2-gram.binary") exkaldi.lm.arpa_to_binary(arpaFile, binaryLmFile) ``` Use the binary LM file to initialize a Python KenLM n-grams object. ``` model = exkaldi.lm.KenNGrams(binaryLmFile) model ``` __KenNGrams__ is simple wrapper of KenLM python Model. Check model information: ``` model.info ``` You can query this model with a sentence. ``` model.score_sentence("HELLO WORLD", bos=True, eos=True) ``` There is a example to compute the perplexity of test corpus in order to evaluate the language model. ``` evalTrans = exkaldi.load_transcription( os.path.join(dataDir, "test", "text") ) score = model.perplexity(evalTrans) score type(score) ``` ___score___ is an exkaldi __Metric__ (a subclass of Python dict) object. We design a group of classes to hold Kaldi text format table and exkaldi own text format data: __ListTable__: spk2utt, utt2spk, words, phones and so on. __Transcription__: transcription corpus, n-best decoding result and so on. __Metric__: AM score, LM score, LM perplexity, Sentence lengthes and so on. __ArkIndexTable__: The index of binary data. __WavSegment__: The wave information. All these classes are subclasses of Python dict. They have some common and respective methods and attributes. In this case, for example, we can compute the average value of __Metric__. ``` score.mean() ``` More precisely, the weighted average by the length os sentences. ``` score.mean( weight= evalTrans.sentence_length() ) ``` Back to Language Model. If you want to use query ARPA model directly. You can use this function. ``` model = exkaldi.load_ngrams(arpaFile) model.info ``` As the termination of this section, we generate the Grammar fst for futher steps. ``` Gfile = os.path.join(dataDir, "exp", "G.fst") exkaldi.decode.graph.make_G(lexicons, arpaFile, outFile=Gfile, order=2) ```
github_jupyter
<h1 style="text-align:center">Chapter 2</h1> --- ###### Words --- Take a look at this sentence : 'The quick brown fox jumps over the lazy fox, and took his meal.' * The sentence has 13 _Words_ if you don't count punctuations, and 15 if you count punctions. * To count punctuation as a word or not depends on the task in hand. * For some tasks like P-O-S tagging & speech synthesis, punctuations are treated as words. (Hello! and Hello? are different in speech synthesis) ``` len('The quick brown fox jumps over the lazy fox, and took his meal.'.split()) ``` ##### Utterance > An utterance is a spoken correlate of a sentence. (Speaking a sentence is utterance) Take a look at this sentence: 'I am goi- going to the market to buy ummm fruits.' * This utterance has two kinds of <strong>disfluencies</strong>(disorder in smooth flow). 1. Fragment - The broken off word 'goi' is a fragment. 2. Fillers - Words like ummm, uhhh, are called fillers or filled pauses. ##### Lemma > A lemma is a set of lexical forms having the same stem, the same major part-of-speech, and the same word sense. * Wordform is the full inflected or derived form of the word. Example, Wordforms - cats,cat Lemma - cat Wordforms - Moving, move Lemma - move ##### Vocabulary, Wordtypes, and Wordtokens * Vocabulary - It is the set of distinct words in a corpus. * Wordtypes - It is the size of the vocabulary V i.e. |V| * Wordtokens - It is the total number of running words. Take a look at this sentence: 'They picnicked by the pool, then lay back on the grass and looked at the stars.' Here, * Vocabulary = V = {They, picnicked, by, the, pool, then, lay, back, on, grass, and, looked, at, stars} * Wordtypes = |V| = 14 * Wordtokens(ignoring punctuation) = 16 ``` def vocTypeToken(sentence): tokens = sentence.split() vocabulary = list(set(tokens)) wordtypes = len(vocabulary) wordtokens = len(tokens) print("Sentence = {}\n".format(sentence)) print("Tokens = {}\n".format(tokens)) print("Vocabulary = {}\n".format(sorted(vocabulary))) print("Wordtypes = {}\n".format(wordtypes)) print("Wordtokens = {}".format(wordtokens)) sentence = 'They picnicked by the pool, then lay back on the grass and looked at the stars.' vocTypeToken(sentence) ``` ###### Herdan's Law > The larger the corpora we look at, the more wordtypes we find. The relationsip between wordtypes and tokens is called <strong>Herdan's Law</strong> \begin{equation*} |V| = kN^\beta \end{equation*} , k and \\(\beta\\) are positive consonants. The value of \\(\beta\\) depends on the corpus size and is in the range of 0 to 1. * We can say that the vocabulary size for a text goes up significantly faster than the square root of its length in words. --- - Another rough measure of number of words in a corpus is the number of lemmas. ##### Code switching > The phenonmenon of changing lanugage while reading or writing is called code switching. Example, 'Tu mera dost hai or rahega, don't worry.' --- ## Text Normalization --- Before any type of natural language processing, the text has to be brought a normal condition or state. The below mentioned three tasks are common for almost every normalization process. 1. Tokenizing ( breaking into words ) 2. Normalizing word formats 3. Segmenting sentences ### Word tokenization --- > The task of segmenting text into words. <p style="color:red">Why you should not use split() for tokenizaiton.</p> If using split() on the text, the words like 'Mr. Randolf', emails like '[email protected]' may be broken down as ['Mr.','Randolf'], emails may be broken down as ['hello','@','internet','.','com']. This is not what we generally want, hence special tokenization algorithms must be used. * Commas are generally used as word boundaries but also in large numbers (540,000). * Periods are generally used as sentence boundaries but also in emails, urls, salutation. ##### Clitic > Clitics are words that can't stand on their own. They are attached to other words. Tokenizer can be used to expand clitics. Example of clitics, What're, Who's, You're. - Tokenization algorithms can also be used to tokenize multiwords like 'New York', 'rock N roll'. This tokenization is used in conjunction with <strong>Named Entity Detection</strong> (the task of detecting name, places, dates, organizations) Python code for tokenization below ``` from nltk.tokenize import word_tokenize text = 'The San Francisco-based restaurant," they said, "doesn’t charge $10".' print(word_tokenize(text)) from nltk.tokenize import wordpunct_tokenize print(wordpunct_tokenize(text)) ``` Since tokenization needs to run before any language processing, it needs to be fast. Regex based tokenization is fast but not that smart while handling punctuations, and language dilemma. There are many tokenization algorithms like ByteLevelBPETokenizer, CharBPETokenizer, SentencePieceBPETokenizer. Below excercise shows step by step guide to modern way of tokenization using [huggingface's](https://huggingface.co/) ultrafast tokenization library - [Tokenizer](https://github.com/huggingface/tokenizers) --- #### Notice the speed of huggingface tokenizer and nltk tokenizer ``` !python3 -m pip install tokenizers #install tokenizer from tokenizers import (BertWordPieceTokenizer) tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True) from datetime import datetime def textTokenizer(text): start = (datetime.now()) print(tokenizer.encode(text).tokens) end = (datetime.now()) print("Time taken - {}".format((end-start).total_seconds())) textTokenizer('Number expressions introduce other complications as well; while commas nor- mally appear at word boundaries, commas are used inside numbers in English, every three digits.') ``` * We will discuss about [CLS] and [SEP] later ``` from datetime import datetime def nltkTokenizer(text): start = (datetime.now()) print(word_tokenize(text)) end = (datetime.now()) print("Time taken - {}".format((end-start).total_seconds())) nltkTokenizer('Number expressions introduce other complications as well; while commas nor- mally appear at word boundaries, commas are used inside numbers in English, every three digits.') ``` ##### Word segmentation > Some languages(like Chinese) don't have words seperated by spaces, hence tokenization is not easily done. So word segmentation is done using sequence models trained on hand seperated datasets.
github_jupyter
``` import torch import torch.nn.functional as F import torchsde from torchvision import datasets, transforms import math import numpy as np import pandas as pd from tqdm import tqdm from torchvision.transforms import ToTensor from torch.utils.data import DataLoader import functorch import matplotlib.pyplot as plt import cfollmer.functional as functional from cfollmer.objectives import relative_entropy_control_cost from cfollmer.drifts import SimpleForwardNet, SimpleForwardNetBN, ResNetScoreNetwork from cfollmer.sampler_utils import FollmerSDE device = "cuda" if torch.cuda.is_available() else "cpu" class DNN(torch.nn.Module): def __init__(self, input_dim=1, output_dim=1): super(DNN, self).__init__() self.output_dim = output_dim self.input_dim = input_dim self.nn = torch.nn.Sequential( torch.nn.Linear(input_dim, 100), torch.nn.ReLU(), torch.nn.Linear(100, 100), torch.nn.ReLU(), torch.nn.Linear(100, output_dim) ) def forward(self, x): return self.nn(x) class LinModel(torch.nn.Module): def __init__(self, input_dim=1, output_dim=1): super(LinModel, self).__init__() self.output_dim = output_dim self.input_dim = input_dim self.nn = torch.nn.Sequential( torch.nn.Linear(input_dim, 1), ) def forward(self, x): return self.nn(x) device = "cuda" if torch.cuda.is_available() else "cpu" def lin_reg_data_gen(dim, sigma_n, device, num_samps=30): w = np.ones((dim,1)) b = 1 func = lambda x: np.dot(x, w) + 1 # Test inputs num_test_samples = 30 if dim == 1: X_test = np.linspace(-16, 16, num_samps).reshape(num_samps,1) X_train = np.linspace(-3.5, 3.5, num_samps).reshape(-1,1) else: X_test = np.random.randn(num_samps, dim) X_train = np.random.randn(num_samps, dim) # Noise free training inputs #f_train = np.cos(X_train) f_train = func(X_train) # Noise-free training outputs #f = np.cos(X_test) f = func(X_test) y_test = f # Noisy training Inputs with additive Gaussian noise (zero-mean, variance sigma_n) mu = np.zeros(X_train.size) epsilon = np.random.multivariate_normal(mu, sigma_n**2 * np.eye(X_train.size)) # Noisy targets y_train = f_train + epsilon.reshape(X_train.size,1) return X_train, y_train, X_test, y_test, f dim = dim_data = 1 sigma_n = 0.5 X_train, y_train, X_test, y_test, f = lin_reg_data_gen(dim, sigma_n, device) N_train , _ = X_train.shape N_test , _ = X_test.shape # if dim == 1: # Noisy observations fig, ax = plt.subplots(figsize=(6, 4), tight_layout=True) ax.plot(X_test[:,[0]], f, 'b', label = 'f(x)') ax.plot(X_train[:,[0]], y_train,".", label = 'y(x) = f(x) + $\epsilon$') ax.legend(loc = 'upper left') ax.set_title('Target function with noisy observations ') plt.show() X_train = torch.tensor(X_train, device=device, dtype=torch.float) X_test = torch.tensor(X_test, device=device, dtype=torch.float) y_train = torch.tensor(y_train, device=device, dtype=torch.float) y_test = torch.tensor(y_test, device=device, dtype=torch.float) # dim model = LinModel().to(device) func_model, params = functorch.make_functional(model) size_list = functional.params_to_size_tuples(params) dim = functional.get_number_of_params(size_list) sigma2 = 1 def log_prior(params): return -torch.sum(params**2) / (2 * sigma2) def log_likelihood(x, y, params): preds = func_model(functional.get_params_from_array(params, size_list), x) diff = preds - y return - torch.sum(diff**2) / (2 * sigma_n**2) def log_likelihood_batch(x, y, params_batch): func = lambda params: log_likelihood(x, y, params) func = functorch.vmap(func) return func(params_batch) def log_posterior(x, y, params): return log_prior(params) + (N_train / x.shape[0]) * log_likelihood(x, y, params) def log_posterior_batch(x, y, params_batch): func = lambda params: log_posterior(x, y, params) func = functorch.vmap(func) return func(params_batch) gamma = 0.1**2 n_steps = 300 data_batch_size = 50 param_batch_size = 32 def train(gamma, n_steps, data_batch_size, param_batch_size, dt=0.05, stl=False): sde = FollmerSDE(gamma, SimpleForwardNetBN(input_dim=dim, width=300)).to(device) optimizer = torch.optim.Adam(sde.parameters(), lr=1e-4) losses = [] for _ in tqdm(range(n_steps)): perm = torch.randperm(N_train) x = X_train[perm[:data_batch_size], :] y = y_train[perm[:data_batch_size], :] optimizer.zero_grad() partial_log_p = lambda params_batch: log_posterior_batch(x, y, params_batch) loss = relative_entropy_control_cost(sde, partial_log_p, param_batch_size=param_batch_size, dt=dt, device=device) loss.backward() losses.append(loss.detach().cpu().numpy()) optimizer.step() if stl: # double check theres no references left sde.drift_network_detatched.load_state_dict((sde.drift_network.state_dict())) losses = np.array(losses) return sde, losses def predict(param_samples, x, y): with torch.no_grad(): predict_func = lambda params : func_model(functional.get_params_from_array(params, size_list), x) predict_func = functorch.vmap(predict_func) preds = predict_func(param_samples) std, mean = torch.std_mean(preds, dim=0) mse = torch.mean((y_test - mean)**2) logp = torch.mean(log_likelihood_batch(x, y, param_samples)) return std, mean, logp, mse def plot_fit(mean, std, title="", fn=None): x = X_test.cpu().squeeze() std = std.cpu().squeeze() mean = mean.cpu().squeeze() plt.plot(x, mean) plt.fill_between(x, mean - 2 * std, mean + 2 * std, alpha=0.2) plt.plot(X_train.cpu(), y_train.cpu(), 'kP', ms = 9) plt.title(title) plt.legend(["mean prediction", "data", r"$\pm 2\sigma^2$"], loc="upper left") if fn is not None: plt.savefig(fn, bbox_inches="tight", dpi=600) plt.close() sde, losses = train(gamma, n_steps, data_batch_size, param_batch_size) plt.plot(losses) param_samples = sde.sample(100, dt=0.01, device=device) std, mean, logp, mse = predict(param_samples, X_test, y_test) std = torch.sqrt(std**2 + sigma_n**2) plot_fit(mean, std, title="SBP fit", fn=None) plt.show() plot_fit(mean, std, title="SBP fit", fn="plots/step_func/sbp_fit.png") # plt.show() param_samples.std(dim=0), sfs_samps.std(dim=0) class MCFollmerDrift: def __init__(self, log_posterior, X,y, dim, device, n_samp=300, gamma=torch.tensor(1), debug=False): self.log_posterior = log_posterior self.debug = debug self.log_posterior = log_posterior self.device = device self.X = X self.dim = dim self.y = y self.gamma = gamma self.n_samp = n_samp self.distrib = torch.distributions.multivariate_normal.MultivariateNormal( loc=torch.zeros(dim), covariance_matrix=torch.eye(dim) * torch.sqrt(gamma) ) def g(self, thet): func = lambda params: self.log_posterior(self.X, self.y, params) func = functorch.vmap(func) lp = func(thet) reg = 0.5 * (thet**2).sum(dim=-1) / self.gamma # if torch.any(torch.isinf(torch.exp(lp + reg))): out = torch.exp(lp + reg) isnan = torch.isinf(torch.abs(out)) | torch.isnan(out) if self.debug and torch.any(isnan): import pdb; pdb.set_trace() # import pdb; pdb.set_trace() return out # nans exp(reg) def ln_g(self, thet): func = lambda params: self.log_posterior(self.X, self.y, params) func = functorch.vmap(func) lp = func(thet) reg = 0.5 * (thet**2).sum(dim=-1) / self.gamma out = lp + reg isnan = torch.isinf(torch.abs(out)) | torch.isnan(out) if self.debug and torch.any(isnan): import pdb; pdb.set_trace() return out # nans exp(reg) def mc_follmer_drift_(self, t, params, Z): # Using Stein Estimator for SFS drift g_YZt = self.g(params[None, ...] + torch.sqrt(1-t) * Z) num = (Z * g_YZt[..., None]).mean(dim=0) denom = torch.sqrt(1-t) * (g_YZt).mean(dim=0) out = num / denom[...,None] isnan = torch.isinf(torch.abs(out)) | torch.isnan(out) return out def mc_follmer_drift_stable(self, t, params, Z): # Using Stein Estimator for SFS drift N, d = Z.shape lnN = torch.log(torch.tensor(N)).to(self.device) ln_g_YZt = self.ln_g(params[None, ...] + torch.sqrt(1-t) * Z) Z_plus = torch.nn.functional.relu(Z) Z_minus = torch.nn.functional.relu(-Z) ln_num_plus = torch.logsumexp( (torch.log(Z_plus) + ln_g_YZt[..., None]) - lnN, dim=0, ) ln_num_minus = torch.logsumexp( (torch.log(Z_minus) + ln_g_YZt[..., None]) - lnN, dim=0 ) ln_denom = torch.logsumexp( torch.log(torch.sqrt(1-t)) + (ln_g_YZt) - lnN, dim=0 ) out = torch.exp(ln_num_plus-ln_denom) - torch.exp(ln_num_minus-ln_denom) isnan = torch.isinf(torch.abs(out)) | torch.isnan(out) return out def mc_follmer_drift_debug(self, t, params): # Using Stein Estimator for SFS drift Z = self.distrib.rsample((self.n_samp,)).to(self.device) params = params[0] g_YZt = self.g(params[None, ...] + torch.sqrt(1-t) * Z) num = (Z * g_YZt[..., None]).mean(dim=0) denom = torch.sqrt(1-t) * (g_YZt).mean(dim=0) out = num / denom[...,None] isnan = torch.isinf(torch.abs(out)) | torch.isnan(out) if self.debug and torch.any(isnan): import pdb; pdb.set_trace() return out.reshape(1,-1) def mc_follmer_drift(self, t , params_batch): Z = self.distrib.rsample((params_batch.shape[0], self.n_samp)).to(self.device) func = lambda params, z: self.mc_follmer_drift_stable(t, params, z) func = functorch.vmap(func, in_dims=(0,0) ) out = func(params_batch, Z) # import pdb; pdb.set_trace() return out class MCFollmerSDE(torch.nn.Module): def __init__(self, gamma, dim, log_posterior, X_train, y_train, device, debug=False): super().__init__() self.noise_type = 'diagonal' self.sde_type = 'ito' self.gamma = gamma if debug: self.drift = MCFollmerDrift(log_posterior, X_train, y_train, dim, device, gamma=gamma, debug=debug).mc_follmer_drift_debug else: self.drift = MCFollmerDrift(log_posterior, X_train, y_train, dim, device, gamma=gamma).mc_follmer_drift self.dim = dim def f(self, t, y, detach=False): return self.drift(t, y) def g(self, t, y): return torch.sqrt(self.gamma )* torch.ones_like(y) def sample_trajectory(self, batch_size, dt=0.05, device=None): param_init = torch.zeros((batch_size, self.dim), device=device) n_steps = int(1.0 / dt) ts = torch.linspace(0, 1, n_steps, device=device) param_trajectory = torchsde.sdeint(self, param_init, ts, method="euler", dt=dt) return param_trajectory, ts def sample(self, batch_size, dt=0.05, device=None): return self.sample_trajectory(batch_size, dt=dt, device=device)[0] [-1]#[-1] # mcfol = MCFollmerDrift(log_posterior, X_train, y_train, dim, device) sde_sfs = MCFollmerSDE(torch.tensor(gamma), dim, log_posterior, X_train, y_train, device) # sfs_samps sfs_samps = sde_sfs.sample(130, dt=0.01, device=device) sfs_samps (~torch.isnan(sfs_samps).sum(dim=1).bool()).sum() sfs_samps = sfs_samps[~torch.isnan(sfs_samps).sum(dim=1).bool()] std_sfs, mean_sfs, logp_sfs, mse_sfs = predict(sfs_samps, X_test, y_test) std_sfs = torch.sqrt(std_sfs**2 + sigma_n**2) plot_fit(mean_sfs, std_sfs, title="SBP fit", fn=None) # plot_fit(mean, std, title="SBP fit", fn=None) # plot_fit(torch.tensor(mean_true), torch.tensor(std_true), title="Exact fit", fn=None) plt.show() plot_fit(mean_sfs, std_sfs, title="SBP fit", fn="plots/step_func/sbp_sfs_mc_fit.png") def pred_true_std(X_train, X_test, sigma_n, sigma2, dim): # https://github.com/probml/pml-book/releases/latest/download/book1.pdf # See Eq 11.124 in the above link page 430 on pdf viewer (page 400 on page number in pdf) X_trainnp = X_train.cpu().detach().numpy() n_, d = X_trainnp.shape X_trainnp = np.concatenate((X_trainnp, np.ones((n_, 1))), axis=1) X_testnp = X_test.cpu().detach().numpy() n_, d = X_testnp.shape X_testnp = np.concatenate((X_testnp, np.ones((n_, 1))), axis=1) print(X_trainnp.shape) Sigma_post = sigma_n**2 * np.linalg.inv(sigma_n**2 * np.eye(dim) / sigma2 + np.dot(X_trainnp.T,X_trainnp)) sigma_pred = [] for i in range(n_): sigma_pred += [np.dot(X_testnp[i,:].dot(Sigma_post), X_testnp[i,:]) + sigma_n**2 ] std_true = np.sqrt(sigma_pred) return std_true std_true = pred_true_std(X_train, X_test, sigma_n, sigma2, dim) def pred_true_mean(y_train, X_train, X_test, sigma_n, sigma2, dim): # https://github.com/probml/pml-book/releases/latest/download/book1.pdf # See Eq 11.124 in the above link page 430 on pdf viewer (page 400 on page number in pdf) X_trainnp = X_train.cpu().detach().numpy() n_, d = X_trainnp.shape lambda_ = sigma_n**2 / sigma2 X_trainnp = np.concatenate((X_trainnp, np.ones((n_, 1))), axis=1) X_testnp = X_test.cpu().detach().numpy() n_, d = X_testnp.shape X_testnp = np.concatenate((X_testnp, np.ones((n_, 1))), axis=1) Xty = np.dot(X_trainnp.T, y_train) Sigma_post = np.linalg.inv(sigma_n**2 * np.eye(dim) / sigma2 + np.dot(X_trainnp.T,X_trainnp)) w = np.dot(Sigma_post, Xty) print(w.shape) return np.dot(X_testnp,w) mean_true = pred_true_mean(y_train.detach().cpu(), X_train, X_test, sigma_n, sigma2, dim) mean_true.shape param_samples = sde.sample(100, dt=0.01, device=device) plot_fit(torch.tensor(mean_true), torch.tensor(std_true), title="Exact fit", fn=None) plt.show() # plot_fit(mean, std, title="SBP fit", fn="plots/step_func/sbp_fit.png") np.abs(std_sfs.detach().cpu().numpy()- std_true).mean(), np.abs(std.detach().cpu().numpy()- std_true).mean() np.abs(mean_sfs.detach().cpu().numpy()- mean_true).mean(), np.abs(mean.detach().cpu().numpy()- mean_true).mean() std_sfs = torch.sqrt(std_sfs**2 + sigma_n**2) plot_fit(mean_sfs, std_sfs, title="SBP fit", fn=None) plot_fit(mean, std, title="SBP fit", fn=None) plot_fit(torch.tensor(mean_true), torch.tensor(std_true), title="Exact fit", fn=None) plt.show() plot_fit(mean_sfs, std_sfs, title="SBP fit", fn="plots/step_func/sbp_sfs_mc_fit.png") # plot_fit(mean_sfs, std_sfs, title="SBP fit", fn=None) plot_fit(mean, std_sfs, title="SBP fit", fn=None) plot_fit(torch.tensor(mean_true), torch.tensor(std_true), title="Exact fit", fn=None) n_runs = 5 sbp_mse = [] sbp_logp = [] for i in range(n_runs): sde, losses = train(gamma, n_steps, data_batch_size, param_batch_size) with torch.no_grad(): param_samples = sde.sample(100, dt=0.01, device=device) std, mean, logp, mse = predict(param_samples, X_test, y_test) std = torch.sqrt(std**2 + sigma_n**2) plot_fit(mean, std, title="SBP fit", fn=None) plt.show() plot_fit(mean, std, title="SBP fit #{}".format(i+1), fn="plots/step_func/sbp_fit_#{:d}.png".format(i+1)) sbp_mse.append(mse.cpu().numpy()) sbp_logp.append(logp.cpu().numpy()) sbp_mse = np.array(sbp_mse) sbp_logp = np.array(sbp_logp) @torch.enable_grad() def gradient(x, y, params): params_ = params.clone().requires_grad_(True) loss = log_posterior(x, y, params_) grad, = torch.autograd.grad(loss, params_) return loss.detach().cpu().numpy(), grad def step_size(n): return 1e-4/ (1 + n)**0.1 def sgld(n_steps, last_n, data_batch_size): losses = [] param_samples = [] params = torch.zeros(dim).float().to(device) for step in tqdm(range(n_steps)): perm = torch.randperm(N_train) x = X_train[perm[:data_batch_size], :] y = y_train[perm[:data_batch_size], :] eps = step_size(step) loss, grad = gradient(x, y, params) params = params + 0.5 * eps * grad + np.sqrt(eps) * torch.randn_like(params) if n_steps <= step + last_n: param_samples.append(params) losses.append(loss) param_samples = torch.stack(param_samples) return param_samples, losses param_samples, losses = sgld(10000, 2000, data_batch_size) plt.plot(losses) std, mean, logp, mse = predict(param_samples[:100], X_test, y_test) std = torch.sqrt(std**2 + sigma_n**2) plot_fit(mean, std, title="SBP fit", fn=None) plt.show() plot_fit(mean, std, title="SGLD fit", fn="plots/step_func/sgld_fit.png") n_runs = 5 n_steps = 10000 sgld_mse = [] sgld_logp = [] for i in range(n_runs): param_samples, losses = sgld(n_steps, 1000, data_batch_size) std, mean, logp, mse = predict(param_samples[:100], X_test, y_test) plot_fit(mean, std, title="SGLD fit #{} (100 samples)".format(i+1), fn="plots/step_func/sgld_fit_#{:d}_100.png".format(i+1)) std, mean, _, _ = predict(param_samples[:500], X_test, y_test) plot_fit(mean, std, title="SGLD fit #{} (500 samples)".format(i+1), fn="plots/step_func/sgld_fit_#{:d}_500.png".format(i+1)) std, mean, _, _ = predict(param_samples, X_test, y_test) plot_fit(mean, std, title="SGLD fit #{} (1000 samples)".format(i+1), fn="plots/step_func/sgld_fit_#{:d}_1000.png".format(i+1)) sgld_mse.append(mse.cpu().numpy()) sgld_logp.append(logp.cpu().numpy()) sgld_mse = np.array(sgld_mse) sgld_logp = np.array(sgld_logp) SBP_df = pd.DataFrame({"mse": sbp_mse, "logp": sbp_logp}) SGLD_df = pd.DataFrame({"mse": sgld_mse, "logp": sgld_logp}) SBP_df SBP_df.describe() SGLD_df SGLD_df.describe() ```
github_jupyter
# Assignment 2 - Q-Learning and Expected Sarsa Welcome to Course 2 Programming Assignment 2. In this notebook, you will: - Implement Q-Learning with $\epsilon$-greedy action selection - Implement Expected Sarsa with $\epsilon$-greedy action selection - Investigate how these two algorithms behave on Cliff World (described on page 132 of the textbook) We will provide you with the environment and infrastructure to run an experiment (called the experiment program in RL-Glue). This notebook will provide all the code you need to run your experiment and visualise learning performance. This assignment will be graded automatically by comparing the behavior of your agent to our implementations of Expected Sarsa and Q-learning. The random seed will be set to avoid different behavior due to randomness. **You should not call any random functions in this notebook.** It will affect the agent's random state and change the results. ## Packages You will need the following libraries for this assignment. We are using: 1. numpy: the fundamental package for scientific computing with Python. 2. scipy: a Python library for scientific and technical computing. 3. matplotlib: library for plotting graphs in Python. 4. RL-Glue: library for reinforcement learning experiments. **Please do not import other libraries** — this will break the autograder. ``` %matplotlib inline import numpy as np from scipy.stats import sem import matplotlib.pyplot as plt from rl_glue import RLGlue import agent import cliffworld_env from tqdm import tqdm import pickle plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'figure.figsize': [10,5]}) ``` ## Section 1: Q-Learning In this section you will implement and test a Q-Learning agent with $\epsilon$-greedy action selection (Section 6.5 in the textbook). ### Implementation Your job is to implement the updates in the methods agent_step and agent_end. We provide detailed comments in each method describing what your code should do. ``` # [Graded] # Q-Learning agent here class QLearningAgent(agent.BaseAgent): def agent_init(self, agent_init_info): """Setup for the agent called when the experiment first starts. Args: agent_init_info (dict), the parameters used to initialize the agent. The dictionary contains: { num_states (int): The number of states, num_actions (int): The number of actions, epsilon (float): The epsilon parameter for exploration, step_size (float): The step-size, discount (float): The discount factor, } """ # Store the parameters provided in agent_init_info. self.num_actions = agent_init_info["num_actions"] self.num_states = agent_init_info["num_states"] self.epsilon = agent_init_info["epsilon"] self.step_size = agent_init_info["step_size"] self.discount = agent_init_info["discount"] self.rand_generator = np.random.RandomState(agent_info["seed"]) # Create an array for action-value estimates and initialize it to zero. self.q = np.zeros((self.num_states, self.num_actions)) # The array of action-value estimates. def agent_start(self, state): """The first method called when the episode starts, called after the environment starts. Args: state (int): the state from the environment's evn_start function. Returns: action (int): the first action the agent takes. """ # Choose action using epsilon greedy. current_q = self.q[state,:] if self.rand_generator.rand() < self.epsilon: action = self.rand_generator.randint(self.num_actions) else: action = self.argmax(current_q) self.prev_state = state self.prev_action = action return action def agent_step(self, reward, state): """A step taken by the agent. Args: reward (float): the reward received for taking the last action taken state (int): the state from the environment's step based on where the agent ended up after the last step. Returns: action (int): the action the agent is taking. """ # Choose action using epsilon greedy. current_q = self.q[state, :] if self.rand_generator.rand() < self.epsilon: action = self.rand_generator.randint(self.num_actions) else: action = self.argmax(current_q) # Perform an update (1 line) ### START CODE HERE ### self.q[self.prev_state,self.prev_action] = self.q[self.prev_state,self.prev_action] + self.step_size*( reward + self.discount*np.max(self.q[state,:]) - self.q[self.prev_state,self.prev_action] ) ### END CODE HERE ### self.prev_state = state self.prev_action = action return action def agent_end(self, reward): """Run when the agent terminates. Args: reward (float): the reward the agent received for entering the terminal state. """ # Perform the last update in the episode (1 line) ### START CODE HERE ### self.q[self.prev_state,self.prev_action] = self.q[self.prev_state,self.prev_action] + self.step_size*( reward - self.q[self.prev_state,self.prev_action] ) ### END CODE HERE ### def argmax(self, q_values): """argmax with random tie-breaking Args: q_values (Numpy array): the array of action-values Returns: action (int): an action with the highest value """ top = float("-inf") ties = [] for i in range(len(q_values)): if q_values[i] > top: top = q_values[i] ties = [] if q_values[i] == top: ties.append(i) return self.rand_generator.choice(ties) ``` ### Test Run the cells below to test the implemented methods. The output of each cell should match the expected output. Note that passing this test does not guarantee correct behavior on the Cliff World. ``` # Do not modify this cell! ## Test Code for agent_start() ## agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "seed": 0} current_agent = QLearningAgent() current_agent.agent_init(agent_info) action = current_agent.agent_start(0) print("Action Value Estimates: \n", current_agent.q) print("Action:", action) ``` **Expected Output:** ``` Action Value Estimates: [[0. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.]] Action: 1 ``` ``` # Do not modify this cell! ## Test Code for agent_step() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "seed": 0} current_agent = QLearningAgent() current_agent.agent_init(agent_info) actions.append(current_agent.agent_start(0)) actions.append(current_agent.agent_step(2, 1)) actions.append(current_agent.agent_step(0, 0)) print("Action Value Estimates: \n", current_agent.q) print("Actions:", actions) ``` **Expected Output:** ``` Action Value Estimates: [[ 0. 0.2 0. 0. ] [ 0. 0. 0. 0.02] [ 0. 0. 0. 0. ]] Actions: [1, 3, 1] ``` ``` # Do not modify this cell! ## Test Code for agent_end() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "seed": 0} current_agent = QLearningAgent() current_agent.agent_init(agent_info) actions.append(current_agent.agent_start(0)) actions.append(current_agent.agent_step(2, 1)) current_agent.agent_end(1) print("Action Value Estimates: \n", current_agent.q) print("Actions:", actions) ``` **Expected Output:** ``` Action Value Estimates: [[0. 0.2 0. 0. ] [0. 0. 0. 0.1] [0. 0. 0. 0. ]] Actions: [1, 3] ``` ## Section 2: Expected Sarsa In this section you will implement an Expected Sarsa agent with $\epsilon$-greedy action selection (Section 6.6 in the textbook). ### Implementation Your job is to implement the updates in the methods agent_step and agent_end. We provide detailed comments in each method describing what your code should do. ``` # [Graded] # Expected Sarsa agent here class ExpectedSarsaAgent(agent.BaseAgent): def agent_init(self, agent_init_info): """Setup for the agent called when the experiment first starts. Args: agent_init_info (dict), the parameters used to initialize the agent. The dictionary contains: { num_states (int): The number of states, num_actions (int): The number of actions, epsilon (float): The epsilon parameter for exploration, step_size (float): The step-size, discount (float): The discount factor, } """ # Store the parameters provided in agent_init_info. self.num_actions = agent_init_info["num_actions"] self.num_states = agent_init_info["num_states"] self.epsilon = agent_init_info["epsilon"] self.step_size = agent_init_info["step_size"] self.discount = agent_init_info["discount"] self.rand_generator = np.random.RandomState(agent_info["seed"]) # Create an array for action-value estimates and initialize it to zero. self.q = np.zeros((self.num_states, self.num_actions)) # The array of action-value estimates. def agent_start(self, state): """The first method called when the episode starts, called after the environment starts. Args: state (int): the state from the environment's evn_start function. Returns: action (int): the first action the agent takes. """ # Choose action using epsilon greedy. current_q = self.q[state, :] if self.rand_generator.rand() < self.epsilon: action = self.rand_generator.randint(self.num_actions) else: action = self.argmax(current_q) self.prev_state = state self.prev_action = action return action def agent_step(self, reward, state): """A step taken by the agent. Args: reward (float): the reward received for taking the last action taken state (int): the state from the environment's step based on where the agent ended up after the last step. Returns: action (int): the action the agent is taking. """ # Choose action using epsilon greedy. current_q = self.q[state,:] if self.rand_generator.rand() < self.epsilon: action = self.rand_generator.randint(self.num_actions) else: action = self.argmax(current_q) """ pi(any action) = epsilon / num_actions # any action might be chosen in the non-greedy case pi(greedy action) = pi(any action) + (1 - epsilon) / num_greedy_actions """ # Perform an update (~5 lines) ### START CODE HERE ### max_q = np.max(current_q) num_greedy_actions = np.sum(current_q==max_q) non_greedy_actions_prob = (self.epsilon / self.num_actions) greedy_actions_prob = ((1 - self.epsilon) / num_greedy_actions) + (self.epsilon / self.num_actions) expected_q = 0 for a in range(self.num_actions): if current_q[a] == max_q: # This is a greedy action expected_q += current_q[a] * greedy_actions_prob else: # This is a non-greedy action expected_q += current_q[a] * non_greedy_actions_prob self.q[self.prev_state,self.prev_action] = self.q[self.prev_state,self.prev_action] + self.step_size*( reward + self.discount*expected_q - self.q[self.prev_state,self.prev_action] ) ### END CODE HERE ### self.prev_state = state self.prev_action = action return action def agent_end(self, reward): """Run when the agent terminates. Args: reward (float): the reward the agent received for entering the terminal state. """ # Perform the last update in the episode (1 line) ### START CODE HERE ### self.q[self.prev_state,self.prev_action] = self.q[self.prev_state,self.prev_action] + self.step_size*( reward - self.q[self.prev_state,self.prev_action] ) ### END CODE HERE ### def argmax(self, q_values): """argmax with random tie-breaking Args: q_values (Numpy array): the array of action-values Returns: action (int): an action with the highest value """ top = float("-inf") ties = [] for i in range(len(q_values)): if q_values[i] > top: top = q_values[i] ties = [] if q_values[i] == top: ties.append(i) return self.rand_generator.choice(ties) ``` ### Test Run the cells below to test the implemented methods. The output of each cell should match the expected output. Note that passing this test does not guarantee correct behavior on the Cliff World. ``` # Do not modify this cell! ## Test Code for agent_start() ## agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "seed": 0} current_agent = ExpectedSarsaAgent() current_agent.agent_init(agent_info) action = current_agent.agent_start(0) print("Action Value Estimates: \n", current_agent.q) print("Action:", action) ``` **Expected Output:** ``` Action Value Estimates: [[0. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.]] Action: 1 ``` ``` # Do not modify this cell! ## Test Code for agent_step() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "seed": 0} current_agent = ExpectedSarsaAgent() current_agent.agent_init(agent_info) actions.append(current_agent.agent_start(0)) actions.append(current_agent.agent_step(2, 1)) actions.append(current_agent.agent_step(0, 0)) print("Action Value Estimates: \n", current_agent.q) print("Actions:", actions) ``` **Expected Output:** ``` Action Value Estimates: [[0. 0.2 0. 0. ] [0. 0. 0. 0.0185] [0. 0. 0. 0. ]] Actions: [1, 3, 1] ``` ``` # Do not modify this cell! ## Test Code for agent_end() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "seed": 0} current_agent = ExpectedSarsaAgent() current_agent.agent_init(agent_info) actions.append(current_agent.agent_start(0)) actions.append(current_agent.agent_step(2, 1)) current_agent.agent_end(1) print("Action Value Estimates: \n", current_agent.q) print("Actions:", actions) ``` **Expected Output:** ``` Action Value Estimates: [[0. 0.2 0. 0. ] [0. 0. 0. 0.1] [0. 0. 0. 0. ]] Actions: [1, 3] ``` ## Section 3: Solving the Cliff World We described the Cliff World environment in the video "Expected Sarsa in the Cliff World" in Lesson 3. This is an undiscounted episodic task and thus we set $\gamma$=1. The agent starts in the bottom left corner of the gridworld below and takes actions that move it in the four directions. Actions that would move the agent off of the cliff incur a reward of -100 and send the agent back to the start state. The reward for all other transitions is -1. An episode terminates when the agent reaches the bottom right corner. <img src="cliffworld.png" alt="Drawing" style="width: 600px;"/> Using the experiment program in the cell below we now compare the agents on the Cliff World environment and plot the sum of rewards during each episode for the two agents. The result of this cell will be graded. If you make any changes to your algorithms, you have to run this cell again before submitting the assignment. ``` # Do not modify this cell! agents = { "Q-learning": QLearningAgent, "Expected Sarsa": ExpectedSarsaAgent } env = cliffworld_env.Environment all_reward_sums = {} # Contains sum of rewards during episode all_state_visits = {} # Contains state visit counts during the last 10 episodes agent_info = {"num_actions": 4, "num_states": 48, "epsilon": 0.1, "step_size": 0.5, "discount": 1.0} env_info = {} num_runs = 100 # The number of runs num_episodes = 500 # The number of episodes in each run for algorithm in ["Q-learning", "Expected Sarsa"]: all_reward_sums[algorithm] = [] all_state_visits[algorithm] = [] for run in tqdm(range(num_runs)): agent_info["seed"] = run rl_glue = RLGlue(env, agents[algorithm]) rl_glue.rl_init(agent_info, env_info) reward_sums = [] state_visits = np.zeros(48) # last_episode_total_reward = 0 for episode in range(num_episodes): if episode < num_episodes - 10: # Runs an episode rl_glue.rl_episode(0) else: # Runs an episode while keeping track of visited states state, action = rl_glue.rl_start() state_visits[state] += 1 is_terminal = False while not is_terminal: reward, state, action, is_terminal = rl_glue.rl_step() state_visits[state] += 1 reward_sums.append(rl_glue.rl_return()) # last_episode_total_reward = rl_glue.rl_return() all_reward_sums[algorithm].append(reward_sums) all_state_visits[algorithm].append(state_visits) # save results import os import shutil os.makedirs('results', exist_ok=True) np.save('results/q_learning.npy', all_reward_sums['Q-learning']) np.save('results/expected_sarsa.npy', all_reward_sums['Expected Sarsa']) shutil.make_archive('results', 'zip', '.', 'results') for algorithm in ["Q-learning", "Expected Sarsa"]: plt.plot(np.mean(all_reward_sums[algorithm], axis=0), label=algorithm) plt.xlabel("Episodes") plt.ylabel("Sum of\n rewards\n during\n episode",rotation=0, labelpad=40) plt.xlim(0,500) plt.ylim(-100,0) plt.legend() plt.show() ``` To see why these two agents behave differently, let's inspect the states they visit most. Run the cell below to generate plots showing the number of timesteps that the agents spent in each state over the last 10 episodes. ``` # Do not modify this cell! for algorithm, position in [("Q-learning", 211), ("Expected Sarsa", 212)]: plt.subplot(position) average_state_visits = np.array(all_state_visits[algorithm]).mean(axis=0) grid_state_visits = average_state_visits.reshape((4,12)) grid_state_visits[0,1:-1] = np.nan plt.pcolormesh(grid_state_visits, edgecolors='gray', linewidth=2) plt.title(algorithm) plt.axis('off') cm = plt.get_cmap() cm.set_bad('gray') plt.subplots_adjust(bottom=0.0, right=0.7, top=1.0) cax = plt.axes([0.85, 0.0, 0.075, 1.]) cbar = plt.colorbar(cax=cax) cbar.ax.set_ylabel("Visits during\n the last 10\n episodes", rotation=0, labelpad=70) plt.show() ``` The Q-learning agent learns the optimal policy, one that moves along the cliff and reaches the goal in as few steps as possible. However, since the agent does not follow the optimal policy and uses $\epsilon$-greedy exploration, it occasionally falls off the cliff. The Expected Sarsa agent takes exploration into account and follows a safer path. Note this is different from the book. The book shows Sarsa learns the even safer path Previously we used a fixed step-size of 0.5 for the agents. What happens with other step-sizes? Does this difference in performance persist? In the next experiment we will try 10 different step-sizes from 0.1 to 1.0 and compare the sum of rewards per episode averaged over the first 100 episodes (similar to the interim performance curves in Figure 6.3 of the textbook). Shaded regions show standard errors. This cell takes around 10 minutes to run. The result of this cell will be graded. If you make any changes to your algorithms, you have to run this cell again before submitting the assignment. ``` # Do not modify this cell! agents = { "Q-learning": QLearningAgent, "Expected Sarsa": ExpectedSarsaAgent } env = cliffworld_env.Environment all_reward_sums = {} step_sizes = np.linspace(0.1,1.0,10) agent_info = {"num_actions": 4, "num_states": 48, "epsilon": 0.1, "discount": 1.0} env_info = {} num_runs = 100 num_episodes = 100 all_reward_sums = {} for algorithm in ["Q-learning", "Expected Sarsa"]: for step_size in step_sizes: all_reward_sums[(algorithm, step_size)] = [] agent_info["step_size"] = step_size for run in tqdm(range(num_runs)): agent_info["seed"] = run rl_glue = RLGlue(env, agents[algorithm]) rl_glue.rl_init(agent_info, env_info) return_sum = 0 for episode in range(num_episodes): rl_glue.rl_episode(0) return_sum += rl_glue.rl_return() all_reward_sums[(algorithm, step_size)].append(return_sum/num_episodes) for algorithm in ["Q-learning", "Expected Sarsa"]: algorithm_means = np.array([np.mean(all_reward_sums[(algorithm, step_size)]) for step_size in step_sizes]) algorithm_stds = np.array([sem(all_reward_sums[(algorithm, step_size)]) for step_size in step_sizes]) plt.plot(step_sizes, algorithm_means, marker='o', linestyle='solid', label=algorithm) plt.fill_between(step_sizes, algorithm_means + algorithm_stds, algorithm_means - algorithm_stds, alpha=0.2) plt.legend() plt.xlabel("Step-size") plt.ylabel("Sum of\n rewards\n per episode",rotation=0, labelpad=50) plt.xticks(step_sizes) plt.show() ``` ## Wrapping up Expected Sarsa shows an advantage over Q-learning in this problem across a wide range of step-sizes. Congratulations! Now you have: - implemented Q-Learning with $\epsilon$-greedy action selection - implemented Expected Sarsa with $\epsilon$-greedy action selection - investigated the behavior of these two algorithms on Cliff World
github_jupyter
# 2. Imperative Programming Languages 우선 2.5까지 나오는 내용 중에서 빼고 살펴보는데, 지난번에 `CMa01.ipynb`에 작성했던 컴파일러 코드에서 문제점을 수정해 보자. --- 컴파일 타겟이 되는 VM의 단순화된 버전을 하스켈로 구현 ``` -- {-# LANGUAGE DeriveFoldable #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE NoMonomorphismRestriction #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE FlexibleContexts #-} data Instr pa = HALT | NEG | ADD | SUB | MUL | DIV | AND | OR | EQU | NEQ | GR | GEQ | LE | LEQ | POP | DUP | LOADc Int | LOAD -- | LOADr | LOADrc | STORE -- | STOREr | JUMP pa | JUMPz pa | JUMPi pa -- | CALL | RETURN | ENTER | ALLOC | SLIDE | MARK -- | NEW deriving (Eq, Ord, Show, Functor) type CMa = (Code, Stack) type Stack = [Value] type Value = Int -- stack address as reverse index of stack type SA = Int type Code = [Instr PA] -- program address representation newtype PA = PA Code deriving (Eq,Ord,Show) import Data.List data DotDotDot = DotDotDot instance Show DotDotDot where show _ = "..." -- to prevent infinite printing instance {-# OVERLAPS #-} Show Code where show is = "["++intercalate "," (show . fmap (\(PA _) -> DotDotDot) <$> is)++"]" -- to prevent infinite printing instance {-# OVERLAPS #-} Show CMa where show (is,vs) = "{ stack = "++show vs++"\n , code = "++show is++" }" -- load and store operation for Stack load :: SA -> Stack -> Value load i vs = reverse vs !! i store :: SA -> Value -> Stack -> Stack store i x vs = vs1++x:vs2 where (vs1,_:vs2) = splitAt (length vs - 1 - i) vs import Data.Bits step :: CMa -> CMa step (HALT : _, vs) = ([], vs) step (NEG : is, v : vs) = (is, (-v):vs) step (ADD : is, v2:v1:vs) = (is, v1 + v2 : vs) step (SUB : is, v2:v1:vs) = (is, v1 - v2 : vs) step (MUL : is, v2:v1:vs) = (is, v1 * v2 : vs) step (DIV : is, v2:v1:vs) = (is, v1 `div` v2 : vs) step (AND : is, v2:v1:vs) = (is, (v1 .&. v2) : vs) step (OR : is, v2:v1:vs) = (is, (v1 .|. v2) : vs) step (EQU : is, v2:v1:vs) = (is, b2i(v1 == v2) : vs) step (NEQ : is, v2:v1:vs) = (is, b2i(v1 /= v2) : vs) step (GR : is, v2:v1:vs) = (is, b2i(v1 > v2) : vs) step (GEQ : is, v2:v1:vs) = (is, b2i(v1 >= v2) : vs) step (LE : is, v2:v1:vs) = (is, b2i(v1 < v2) : vs) step (LEQ : is, v2:v1:vs) = (is, b2i(v1 <= v2) : vs) step (POP : is, _:vs) = (is, vs) step (DUP : is, v:vs) = (is, v:v:vs) step (LOADc v : is, vs) = (is, v:vs) step (LOAD : is, a:vs) = (is, v:vs) where v = load a vs step (STORE : is, a:n:vs) = (is, n:vs') where vs' = store a n vs step (JUMP (PA c) : _, vs) = (c, vs) step (JUMPz (PA c) : _, 0:vs) = (c, vs) step (JUMPz _ : is, _:vs) = (is, vs) step vm = error $ "VM is stuck: "++show vm i2b 0 = False i2b 1 = True b2i False = 0 b2i True = 1 exec :: CMa -> [CMa] exec vm@([],_) = [vm] exec vm = vm : exec (step vm) run :: CMa -> CMa run = last . exec type LabeledCode = [LabeledInstr] data LabeledInstr = Label :. Instr Label deriving Show type Label = String lbis1 :: LabeledCode lbis1 = [ "" :. LOADc 3 , "loop" :. LOADc 1 , "" :. SUB , "" :. DUP , "" :. JUMPz "end" , "" :. JUMP "loop" , "end" :. HALT ] import Data.Maybe assemble :: LabeledCode -> Code assemble lbis = is' where is' = map (fmap lb2a) is (lbs,is) = unzip [(lb,i) | lb :. i <- lbis] lb2a "" = error "empty string label" lb2a lb = PA $ tails is' !! elemIndex' lb lbs elemIndex' x xs = fromJust (elemIndex x xs) is1 :: Code is1 = [ LOADc 3 ] ++ loop loop = [ LOADc 1 , SUB , DUP , JUMPz (PA end) , JUMP (PA loop) ] ++ end end = [ HALT ] assemble lbis1 is1 mapM_ print . exec $ (is1,[]) mapM_ print . exec $ (assemble lbis1,[]) ``` <br> 이제 책 Fig.2.8 (p.13) 에 나온 C언어 코드를 CMa 명령 코드으로 컴파일하는 함수들을 직접 구현해 보자. **식**(expression)을 컴파일하는 `codeR` 및 `codeL`과 **문**(statement)을 컴파일하는 `code`를 하스켈로 작성해 보자. ``` data Expr = Lit Int -- n (integer literal) | Var String -- x | Neg Expr -- -e | Add Expr Expr -- e1 + 2e | Sub Expr Expr -- e1 - e2 | Mul Expr Expr -- e1 * e2 | Div Expr Expr -- e1 / e2 | And Expr Expr -- e1 + e2 | Or Expr Expr -- e1 || e2 | Equ Expr Expr -- e1 == e2 | Neq Expr Expr -- e1 /= e2 | Gr Expr Expr -- e1 > e2 | Geq Expr Expr -- e1 >= e2 | Le Expr Expr -- e1 <= e2 | Leq Expr Expr -- e1 < e2 | Assign Expr Expr -- eL <- eR (assignment expression. 실제 C문법으로는 eL = eR) deriving (Eq,Ord,Show) data Stmt = EStmt Expr -- e; (expression as statement) | Block [Stmt] -- { s1; ...; sn; } | If Expr Stmt (Maybe Stmt) -- if (e) s 또는 if (e) s1 else s0 | While Expr Stmt -- while (e) s | For (Expr,Expr,Expr) Stmt -- for (e1;e2;e3) s deriving (Eq,Ord,Show) [1,2,3] ++ [4,5,6] (4 :) [5,6,7] import Data.Map (Map, (!), (!?)) import qualified Data.Map as Map type AEnv = Map String SA codeR :: Expr -> AEnv -> (Code -> Code) codeR (Lit q) _ = (LOADc q :) codeR (Var x) ρ = codeL (Var x) ρ . (LOAD :) codeR (Neg e) ρ = codeR e ρ . (NEG :) codeR (Add e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (ADD :) codeR (Sub e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (SUB :) codeR (Mul e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (MUL :) codeR (Div e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (DIV :) codeR (And e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (AND :) codeR (Or e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (OR :) codeR (Equ e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (EQU :) codeR (Neq e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (NEQ :) codeR (Gr e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (GR :) codeR (Geq e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (GEQ :) codeR (Le e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (LE :) codeR (Leq e1 e2) ρ = codeR e1 ρ . codeR e2 ρ . (LEQ :) codeR (Assign eL eR) ρ = codeR eR ρ . codeL eL ρ . (STORE :) codeR e _ = error $ "R-value not defined: "++show e codeL :: Expr -> AEnv -> (Code -> Code) codeL (Var x) ρ = (LOADc (ρ ! x) :) codeL e _ = error $ "L-value not defined: "++show e code :: Stmt -> AEnv -> (Code -> Code) code (EStmt e) ρ = codeR e ρ . (POP :) code (Block ss) ρ = foldr (.) id [code s ρ | s <- ss] code (If e s Nothing) ρ = \k -> codeR e ρ . (JUMPz (PA k) :) . code s ρ $ k code (If e s1 (Just s0)) ρ = \k -> codeR e ρ . (JUMPz (PA (c0 k)) :) . c1 . (JUMP (PA k) :) . c0 $ k where c1 = code s1 ρ c0 = code s0 ρ code (While e s) ρ = c where c = \k -> codeR e ρ . (JUMPz (PA k) :) . code s ρ . (JUMP (PA (c k)) :) $ k code (For (e1,e2,e3) s) ρ = code (Block ss) ρ where ss = [ EStmt e1 , While e2 $ Block [s, EStmt e3] ] ``` 지금은 변수 메모리 공간은 미리 할당되어 있다고 가정한다. 즉, 적절한 *주소환경*(address environment)과 그에 맞는 크기의 stack으로 시작한다고 가정한다는 말이다. 예컨대, 아래 코드를 컴파일한다면 $\rho = \{x\mapsto 0,\, i\mapsto 1\}$라는 주소환경으로 $x$와 $i$에 값을 저장할 주소를 미리 정해 놓고 초기 스택도 그에 맞춰 미리 크기를 잡아 놓고 시작하기로 하자. ```c int x = 1000; int i = 1; x <- x + i; i <- i + 1; ``` 주소환경과 초기 스택을 적절하게 구성해 놓은 상태로 시작한다면 위 코드는 사실상 아래와 같은 코드를 컴파일하는 것과 같다. ```c x <- 1000; i <- 1; x <- x + i; i <- i + 1; ``` ``` stmt3 = Block [ EStmt $ Assign (Var "x") (Lit 1000) , EStmt $ Assign (Var "i") (Lit 1) , EStmt $ Assign (Var "x") (Add (Var "x") (Var "i")) , EStmt $ Assign (Var "i") (Add (Var "i") (Lit 1)) ] is3 = code stmt3 (Map.fromList [("x",0),("i",1)]) is3 [] is3 [HALT] is3 [DUP,POP,HALT] mapM_ print $ exec (is3 [],[0,0]) run (is3 [],[1,1000]) ``` <br> 이번엔 이 프로그램을 컴파일해 보자. ```c int x = 1000; int i = 1; while (i < 5) { x <- x + i; i <- i + 1; } ``` 마찬가지로 $x$와 $i$에 대한 적절한 주소환경 $\{x\mapsto 0,\,i\mapsto 1\}$과 초기 스택으로 시작한다고 가정한다면 아래 코드를 컴파일하면 되는 것이다. ```c x <- 1000; i <- 1; while (i < 5) { x <- x + i; i <- i + 1; } ``` ``` stmt41 = Block [ EStmt $ Assign (Var "x") (Lit 1000) -- x <- 1000; , EStmt $ Assign (Var "i") (Lit 1) -- i <- 1; ] stmt42 = Block [ While (Le (Var "i") (Lit 5)) $ Block -- while (i < 5) { [ EStmt $ Assign (Var "x") (Add (Var "x") (Var "i")) -- x <- x + i; , EStmt $ Assign (Var "i") (Add (Var "i") (Lit 1)) -- i <- i + 1; ] -- } ] stmt43 = Block [ EStmt $ Assign (Var "x") (Add (Var "x") (Lit 100)) -- x <- x + 100; , EStmt $ Assign (Var "i") (Add (Var "i") (Lit 100)) -- i <- i + 100; ] rho4 = Map.fromList [("x",0),("i",1)] is41 = code stmt41 rho4 is42 = code stmt42 rho4 is43 = code stmt43 rho4 is41 . is42 $ [] is41 . is42 . is43 $ [] run (is41 . is41 $ [], [0,0]) run (is41 . is42 $ [], [0,0]) run (is41 . is42 . is43 $ [], [0,0]) run (is41 . is43 . is43 $ [], [0,0]) -- stmt43을 두번 실행했으므로 100을 두번씩 더해 200씩 증가 ``` <br> 정리하자면, 컴파일 함수 `codeR`, `codeL`, `code`가 *식*(`Expr`) 또는 *문*(`Stmt`)과 *주소환경*(`AEnv`)을 받아 고정된 코드(`Code`)를 결과로 계산하는 대신, 뒤이어 오는 **나머지 할 일** 코드를 인자로 받아 전체 코드를 계산해내는 코드 변환 함수(`Code -> Code`)를 결과로 계산하도록 수정하였다. 이렇게 함으로써 조건문이나 반복문에서 그 다음 뒤이어 아직 정해지지 않은 코드 위치로 이동하는 코드를 작성하기에 용이해진다. 이렇게 **나머지 할 일**이라는 개념을 전문용어로는 continuation이라고 한다. 순차적으로 진행되지 않는 계산을 표현하기 위한 개념으로 다양한 곳에 활용된다. ``` stmt5 = Block [ EStmt $ Assign (Var "i") (Lit 1) -- i <- 1; , While (Le (Var "i") (Lit 5)) $ -- while (i < 5) EStmt $ Assign (Var "i") (Add (Var "i") (Lit 1)) -- i <- i + 1; , EStmt $ Assign (Var "i") (Add (Var "i") (Lit 1)) -- i <- i + 1; ] c5 = code stmt5 (Map.fromList [("i",0)]) :type c5 c5 [HALT] mapM_ print $ exec (c5 [HALT], [0]) ```
github_jupyter
## Hi, i was having a hard time trying to load this huge data set as a pandas data frame on my pc, so i searched for alternative ways of doing this as i don't want to pay for cloud services and don't have access to better machines. ### actually the solution was pretty simple, so i'm sharing what i ended up with, maybe i can help other struggling with the same problem. obs: this approach won't let you analyse or summarize the data as pandas data frames would (at least not easily), any criticism or tips are welcomed. ``` import csv from datetime import datetime def clean_data(input_data_path='../input/train.csv', output_data_path='../data/train_cleaned.csv'): """ Clean the data set, removing any row with missing values, delimiter longitudes and latitudes to fit only NY city values, only fare amount greater than 0, and passenger count greater than 0 and lesser than 7, i also removed the header as i'm using tensorflow to load data. :param input_data_path: path containing the raw data set. :param output_data_path: path to write the cleaned data. """ with open(input_data_path, 'r') as inp, open(output_data_path, 'w', newline='') as out: writer = csv.writer(out) count = 0 for row in csv.reader(inp): # Remove header if count > 0: # Only rows with non-null values if len(row) == 8: try: fare_amount = float(row[1]) pickup_longitude = float(row[3]) pickup_latitude = float(row[4]) dropoff_longitude = float(row[5]) dropoff_latitude = float(row[6]) passenger_count = float(row[7]) if ((-76 <= pickup_longitude <= -72) and (-76 <= dropoff_longitude <= -72) and (38 <= pickup_latitude <= 42) and (38 <= dropoff_latitude <= 42) and (1 <= passenger_count <= 6) and fare_amount > 0): writer.writerow(row) except: pass count += 1 def pre_process_train_data(input_data_path='data/train_cleaned.csv', output_data_path='data/train_processed.csv'): """ Pre process the train data, deriving, year, month, day and hour for each row. :param input_data_path: path containing the full data set. :param output_data_path: path to write the pre processed set. """ with open(input_data_path, 'r') as inp, open(output_data_path, 'w', newline='') as out: writer = csv.writer(out) for row in csv.reader(inp): pickup_datetime = datetime.strptime(row[2], '%Y-%m-%d %H:%M:%S %Z') row.append(pickup_datetime.year) row.append(pickup_datetime.month) row.append(pickup_datetime.day) row.append(pickup_datetime.hour) row.append(pickup_datetime.weekday()) writer.writerow(row) def pre_process_test_data(input_data_path='data/test.csv', output_data_path='data/test_processed.csv'): """ Pre process the test data, deriving, year, month, day and hour for each row. :param input_data_path: path containing the full data set. :param output_data_path: path to write the pre processed set. """ with open(input_data_path, 'r') as inp, open(output_data_path, 'w', newline='') as out: writer = csv.writer(out) count = 0 for row in csv.reader(inp): if count > 0: pickup_datetime = datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S %Z') row.append(pickup_datetime.year) row.append(pickup_datetime.month) row.append(pickup_datetime.day) row.append(pickup_datetime.hour) row.append(pickup_datetime.weekday()) writer.writerow(row) else: # Only the header writer.writerow(row) count += 1 def split_data(input_data_path, train_data_path, validation_data_path, ratio=30): """ Splits the csv file (meant to generate train and validation sets). :param input_data_path: path containing the full data set. :param train_data_path: path to write the train set. :param validation_data_path: path to write the validation set. :param ratio: ration to split train and validation sets, (default: 1 of every 30 rows will be validation or 0,033%) """ with open(input_data_path, 'r') as inp, open(train_data_path, 'w', newline='') as out1, \ open(validation_data_path, 'w', newline='') as out2: writer1 = csv.writer(out1) writer2 = csv.writer(out2) count = 0 for row in csv.reader(inp): if count % ratio == 0: writer2.writerow(row) else: writer1.writerow(row) count += 1 ```
github_jupyter
# Randomized Benchmarking ## Contents 1. [Introduction](#intro) 2. [The Randomized Benchmarking Protocol](#protocol) 3. [The Intuition Behind RB](#intuition) 4. [Simultaneous Randomized Benchmarking](#simultaneousrb) 5. [Predicted Gate Fidelity](#predicted-gate-fidelity) 6. [References](#references) ## 1. Introduction <a id='intro'></a> One of the main challenges in building a quantum information processor is the non-scalability of completely characterizing the noise affecting a quantum system via process tomography. In addition, process tomography is sensitive to noise in the pre- and post rotation gates plus the measurements (SPAM errors). Gateset tomography can take these errors into account, but the scaling is even worse. A complete characterization of the noise is useful because it allows for the determination of good error-correction schemes, and thus the possibility of reliable transmission of quantum information. Since complete process tomography is infeasible for large systems, there is growing interest in scalable methods for partially characterizing the noise affecting a quantum system. A scalable (in the number $n$ of qubits comprising the system) and robust algorithm for benchmarking the full set of Clifford gates by a single parameter using randomization techniques was presented in [1]. The concept of using randomization methods for benchmarking quantum gates is commonly called **Randomized Benchmarking (RB)**. ## 2. The Randomized Benchmarking Protocol <a id='protocol'></a> We should first import the relevant qiskit classes for the demonstration: ``` # Import general libraries (needed for functions) import numpy as np import matplotlib.pyplot as plt from IPython import display # Import the RB Functions import qiskit.ignis.verification.randomized_benchmarking as rb # Import Qiskit classes import qiskit from qiskit import assemble, transpile from qiskit.providers.aer.noise import NoiseModel from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error ``` A RB protocol (see [1,2]) consists of the following steps: ### Step 1: Generate RB sequences The RB sequences consist of random Clifford elements chosen uniformly from the Clifford group on $n$-qubits, including a computed reversal element, that should return the qubits to the initial state. More precisely, for each length $m$, we choose $K_m$ RB sequences. Each such sequence contains $m$ random elements $C_{i_j}$ chosen uniformly from the Clifford group on $n$-qubits, and the $m+1$ element is defined as follows: $C_{i_{m+1}} = (C_{i_1}\cdot ... \cdot C_{i_m})^{-1}$. It can be found efficiently by the Gottesmann-Knill theorem. For example, we generate below several sequences of 2-qubit Clifford circuits. ``` # Generate RB circuits (2Q RB) # number of qubits nQ = 2 rb_opts = {} #Number of Cliffords in the sequence rb_opts['length_vector'] = [1, 10, 20, 50, 75, 100, 125, 150, 175, 200] # Number of seeds (random sequences) rb_opts['nseeds'] = 5 # Default pattern rb_opts['rb_pattern'] = [[0, 1]] rb_circs, xdata = rb.randomized_benchmarking_seq(**rb_opts) ``` As an example, we print the circuit corresponding to the first RB sequence ``` rb_circs[0][0].draw() ``` One can verify that the Unitary representing each RB circuit should be the identity (with a global phase). We simulate this using Aer unitary simulator. ``` # Create a new circuit without the measurement qregs = rb_circs[0][-1].qregs cregs = rb_circs[0][-1].cregs qc = qiskit.QuantumCircuit(*qregs, *cregs) for i in rb_circs[0][-1][0:-nQ]: qc.data.append(i) # The Unitary is an identity (with a global phase) sim = qiskit.Aer.get_backend('aer_simulator') basis_gates = ['u1','u2','u3','cx'] # use U,CX for now qc.save_unitary() unitary = sim.run(qc).result().get_unitary() from qiskit.visualization import array_to_latex array_to_latex(unitary, prefix="\\text{Unitary} = ") ``` ### Step 2: Execute the RB sequences (with some noise) We can execute the RB sequences either using Qiskit Aer Simulator (with some noise model) or using IBMQ provider, and obtain a list of results. By assumption each operation $C_{i_j}$ is allowed to have some error, represented by $\Lambda_{i_j,j}$, and each sequence can be modeled by the operation: $$\textit{S}_{\textbf{i}_\textbf{m}} = \bigcirc_{j=1}^{m+1} (\Lambda_{i_j,j} \circ C_{i_j})$$ where ${\textbf{i}_\textbf{m}} = (i_1,...,i_m)$ and $i_{m+1}$ is uniquely determined by ${\textbf{i}_\textbf{m}}$. ``` # Run on a noisy simulator noise_model = NoiseModel() # Depolarizing error on the gates u2, u3 and cx (assuming the u1 is virtual-Z gate and no error) p1Q = 0.002 p2Q = 0.01 noise_model.add_all_qubit_quantum_error(depolarizing_error(p1Q, 1), 'u2') noise_model.add_all_qubit_quantum_error(depolarizing_error(2 * p1Q, 1), 'u3') noise_model.add_all_qubit_quantum_error(depolarizing_error(p2Q, 2), 'cx') backend = qiskit.Aer.get_backend('aer_simulator') ``` ### Step 3: Get statistics about the survival probabilities For each of the $K_m$ sequences the survival probability $Tr[E_\psi \textit{S}_{\textbf{i}_\textbf{m}}(\rho_\psi)]$ is measured. Here $\rho_\psi$ is the initial state taking into account preparation errors and $E_\psi$ is the POVM element that takes into account measurement errors. In the ideal (noise-free) case $\rho_\psi = E_\psi = | \psi {\rangle} {\langle} \psi |$. In practice one can measure the probability to go back to the exact initial state, i.e. all the qubits in the ground state $ {|} 00...0 {\rangle}$ or just the probability for one of the qubits to return back to the ground state. Measuring the qubits independently can be more convenient if a correlated measurement scheme is not possible. Both measurements will fit to the same decay parameter according to the properties of the *twirl*. ### Step 4: Find the averaged sequence fidelity Average over the $K_m$ random realizations of the sequence to find the averaged sequence **fidelity**, $$F_{seq}(m,|\psi{\rangle}) = Tr[E_\psi \textit{S}_{K_m}(\rho_\psi)]$$ where $$\textit{S}_{K_m} = \frac{1}{K_m} \sum_{\textbf{i}_\textbf{m}} \textit{S}_{\textbf{i}_\textbf{m}}$$ is the average sequence operation. ### Step 5: Fit the results Repeat Steps 1 through 4 for different values of $m$ and fit the results for the averaged sequence fidelity to the model: $$ \textit{F}_{seq}^{(0)} \big(m,{|}\psi {\rangle} \big) = A_0 \alpha^m +B_0$$ where $A_0$ and $B_0$ absorb state preparation and measurement errors as well as an edge effect from the error on the final gate. $\alpha$ determines the average error-rate $r$, which is also called **Error per Clifford (EPC)** according to the relation $$ r = 1-\alpha-\frac{1-\alpha}{2^n} = \frac{2^n-1}{2^n}(1-\alpha)$$ (where $n=nQ$ is the number of qubits). As an example, we calculate the average sequence fidelity for each of the RB sequences, fit the results to the exponential curve, and compute the parameters $\alpha$ and EPC. ``` # Create the RB fitter backend = qiskit.Aer.get_backend('aer_simulator') basis_gates = ['u1','u2','u3','cx'] shots = 200 transpiled_circs_list = [] rb_fit = rb.RBFitter(None, xdata, rb_opts['rb_pattern']) for rb_seed, rb_circ_seed in enumerate(rb_circs): print(f'Compiling seed {rb_seed}') new_rb_circ_seed = qiskit.compiler.transpile(rb_circ_seed, basis_gates=basis_gates) transpiled_circs_list.append(new_rb_circ_seed) print(f'Simulating seed {rb_seed}') qobj = assemble(new_rb_circ_seed, shots=shots) job = backend.run(qobj, noise_model=noise_model, max_parallel_experiments=0) # Add data to the fitter rb_fit.add_data(job.result()) print('After seed %d, alpha: %f, EPC: %f'%(rb_seed,rb_fit.fit[0]['params'][1], rb_fit.fit[0]['epc'])) ``` ### Extra Step: Plot the results ``` plt.figure(figsize=(8, 6)) ax = plt.subplot(1, 1, 1) # Plot the essence by calling plot_rb_data rb_fit.plot_rb_data(0, ax=ax, add_label=True, show_plt=False) # Add title and label ax.set_title('%d Qubit RB'%(nQ), fontsize=18) plt.show() ``` ## 3. The Intuition Behind RB <a id='intuition'></a> The depolarizing quantum channel has a parameter $\alpha$, and works like this: with probability $\alpha$, the state remains the same as before; with probability $1-\alpha$, the state becomes the totally mixed state, namely: $$\rho_f = \alpha \rho_i + \frac{1-\alpha}{2^n} * \mathbf{I}$$ Suppose that we have a sequence of $m$ gates, not necessarily Clifford gates, where the error channel of the gates is a depolarizing channel with parameter $\alpha$ (same $\alpha$ for all the gates). Then with probability $\alpha^m$ the state is correct at the end of the sequence, and with probability $1-\alpha^m$ it becomes the totally mixed state, therefore: $$\rho_f^m = \alpha^m \rho_i + \frac{1-\alpha^m}{2^n} * \mathbf{I}$$ Now suppose that in addition we start with the ground state; that the entire sequence amounts to the identity; and that we measure the state at the end of the sequence with the standard basis. We derive that the probability of success at the end of the sequence is: $$\alpha^m + \frac{1-\alpha^m}{2^n} = \frac{2^n-1}{2^n}\alpha^m + \frac{1}{2^n} = A_0\alpha^m + B_0$$ It follows that the probability of success, aka fidelity, decays exponentially with the sequence length, with exponent $\alpha$. The last statement is not necessarily true when the channel is other than the depolarizing channel. However, it turns out that if the gates are uniformly-randomized Clifford gates, then the noise of each gate behaves on average as if it was the depolarizing channel, with some parameter that can be computed from the channel, and we obtain the exponential decay of the fidelity. Formally, taking an average over a finite group $G$ (like the Clifford group) of a quantum channel $\bar \Lambda$ is also called a *twirl*: $$ W_G(\bar \Lambda) \frac{1}{|G|} \sum_{u \in G} U^{\dagger} \circ \bar \Lambda \circ U$$ Twirling over the entire unitary group yields exactly the same result as the Clifford group. The Clifford group is a *2-design* of the unitary group. ## 4. Simultaneous Randomized Benchmarking <a id='simultaneousrb'></a> RB is designed to address fidelities in multiqubit systems in two ways. For one, RB over the full $n$-qubit space can be performed by constructing sequences from the $n$-qubit Clifford group. Additionally, the $n$-qubit space can be subdivided into sets of qubits $\{n_i\}$ and $n_i$-qubit RB performed in each subset simultaneously [4]. Both methods give metrics of fidelity in the $n$-qubit space. For example, it is common to perform 2Q RB on the subset of two-qubits defining a CNOT gate while the other qubits are quiescent. As explained in [4], this RB data will not necessarily decay exponentially because the other qubit subspaces are not twirled. Subsets are more rigorously characterized by simultaneous RB, which also measures some level of crosstalk error since all qubits are active. An example of simultaneous RB (1Q RB and 2Q RB) can be found in: https://github.com/Qiskit/qiskit-tutorials/blob/master/tutorials/noise/4_randomized_benchmarking.ipynb ## 5. Predicted Gate Fidelity <a id='predicted-gate-fidelity'></a> If we know the errors on the underlying gates (the gateset) we can predict the EPC without running RB experiment. This calculation verifies that your RB experiment followed by fitting yields correct EPC value. First we need to count the number of these gates per Clifford. Then, the two qubit Clifford gate error function ``calculate_2q_epc`` gives the error per 2Q Clifford. It assumes that the error in the underlying gates is depolarizing. This function is derived in the supplement to [5]. ``` # count the number of single and 2Q gates in the 2Q Cliffords qubits = rb_opts['rb_pattern'][0] gate_per_cliff = rb.rb_utils.gates_per_clifford( transpiled_circuits_list=transpiled_circs_list, clifford_lengths=xdata[0], basis=basis_gates, qubits=qubits) for basis_gate in basis_gates: print("Number of %s gates per Clifford: %f"%( basis_gate, np.mean([gate_per_cliff[qubit][basis_gate] for qubit in qubits]))) # convert from depolarizing error to epg (1Q) epg_q0 = {'u1': 0, 'u2': p1Q/2, 'u3': 2 * p1Q/2} epg_q1 = {'u1': 0, 'u2': p1Q/2, 'u3': 2 * p1Q/2} # convert from depolarizing error to epg (2Q) epg_q01 = 3/4 * p2Q # calculate the predicted epc from underlying gate errors pred_epc = rb.rb_utils.calculate_2q_epc( gate_per_cliff=gate_per_cliff, epg_2q=epg_q01, qubit_pair=qubits, list_epgs_1q=[epg_q0, epg_q1]) print("Predicted 2Q Error per Clifford: %e (qasm simulator result: %e)" % (pred_epc, rb_fit.fit[0]['epc'])) ``` On the other hand, we can calculate the errors on the underlying gates (the gateset) from the experimentally obtained EPC. Given that we know the errors on the every single-qubit gates in the RB sequence, we can predict 2Q gate error from the EPC of two qubit RB experiment. The two qubit gate error function ``calculate_2q_epg`` gives the estimate of error per 2Q gate. In this section we prepare single-qubit errors using the deporalizing error model. If the error model is unknown, EPGs of those gates, for example [``u1``, ``u2``, ``u3``], can be estimated with a separate 1Q RB experiment with the utility function ``calculate_1q_epg``. ``` # use 2Q EPC from qasm simulator result and 1Q EPGs from depolarizing error model pred_epg = rb.rb_utils.calculate_2q_epg( gate_per_cliff=gate_per_cliff, epc_2q=rb_fit.fit[0]['epc'], qubit_pair=qubits, list_epgs_1q=[epg_q0, epg_q1]) print("Predicted 2Q Error per gate: %e (gate error model: %e)" % (pred_epg, epg_q01)) ``` ## 6. References <a id='references'></a> 1. Easwar Magesan, J. M. Gambetta, and Joseph Emerson, *Robust randomized benchmarking of quantum processes*, https://arxiv.org/pdf/1009.3639 2. Easwar Magesan, Jay M. Gambetta, and Joseph Emerson, *Characterizing Quantum Gates via Randomized Benchmarking*, https://arxiv.org/pdf/1109.6887 3. A. D. C'orcoles, Jay M. Gambetta, Jerry M. Chow, John A. Smolin, Matthew Ware, J. D. Strand, B. L. T. Plourde, and M. Steffen, *Process verification of two-qubit quantum gates by randomized benchmarking*, https://arxiv.org/pdf/1210.7011 4. Jay M. Gambetta, A. D. C´orcoles, S. T. Merkel, B. R. Johnson, John A. Smolin, Jerry M. Chow, Colm A. Ryan, Chad Rigetti, S. Poletto, Thomas A. Ohki, Mark B. Ketchen, and M. Steffen, *Characterization of addressability by simultaneous randomized benchmarking*, https://arxiv.org/pdf/1204.6308 5. David C. McKay, Sarah Sheldon, John A. Smolin, Jerry M. Chow, and Jay M. Gambetta, *Three Qubit Randomized Benchmarking*, https://arxiv.org/pdf/1712.06550 ``` import qiskit.tools.jupyter %qiskit_version_table ```
github_jupyter
``` # default_exp datasets #export from fastai.text import * from tse.preprocessing import * from tse.tokenizers import * ``` ### Prepare Data Inputs for Q/A Following for each input for training is needed: `input_ids`, `attention_mask`, `token_type_ids`, `offsets`, `answer_text`, `start_tok_idx`, `end_tok_idx` Preprocess ``` train_df = pd.read_csv("../data/train.csv").dropna().reset_index(drop=True) test_df = pd.read_csv("../data/test.csv") strip_text(train_df, "text") strip_text(train_df, "selected_text") strip_text(test_df, "text") replace_whitespace(train_df, "text") replace_whitespace(train_df, "selected_text") replace_whitespace(test_df, "text") replace_URLs(train_df, "text") replace_URLs(train_df, "selected_text") replace_URLs(test_df, "text") replace_user(train_df, "text") replace_user(train_df, "selected_text") replace_user(test_df, "text") is_wrong = train_df.apply(lambda o: is_wrong_selection(o['text'], o['selected_text']), 1) train_df = train_df[~is_wrong].reset_index(drop=True) list(train_df['text']) train_df.shape ``` Tokenizer ``` tokenizer = init_roberta_tokenizer("../roberta-base/vocab.json", "../roberta-base/merges.txt", max_length=192) train_df.head() #export def get_start_end_idxs(context, answer): "Get string start and end char for answer span" len_a = len(answer) for i, _ in enumerate(context): if context[i:i+len_a] == answer: start_idx, end_idx = i, i+len_a-1 return start_idx, end_idx raise Exception("No overlapping segment found") #export def get_start_end_tok_idxs(offsets, start_idx, end_idx): "Generate target from tokens - first 4 tokens belong to question" start_tok_idx, end_tok_idx = None, None for tok_idx, off in enumerate(offsets[4:]): if (off[0] <= start_idx) & (off[1] > start_idx): start_tok_idx = tok_idx + 4 if (off[0] <= end_idx) & (off[1] > end_idx): end_tok_idx = tok_idx + 4 return (start_tok_idx, end_tok_idx) trn_stxt, trn_txt, trn_sent = train_df.selected_text.values, train_df.text.values, train_df.sentiment test_txt, test_sent = test_df.text.values, test_df.sentiment.values train_tok_input = list(tuple(zip(trn_sent, trn_txt))) test_tok_input = list(tuple(zip(test_sent, test_txt))) # encode batch train_outputs = tokenizer.encode_batch(train_tok_input) test_outputs = tokenizer.encode_batch(test_tok_input) start_end_idxs = [get_start_end_idxs(s1,s2) for (s1,s2) in zip(trn_txt, trn_stxt)] #export class QAInputGenerator: def __init__(self, contexts, questions, text_ids=None, answers=None, tokenizer=None): self.contexts, self.questions, self.answers = contexts, questions, answers self.outputs = tokenizer.encode_batch(list(tuple(zip(questions, contexts)))) if text_ids is not None: self.text_ids = text_ids if self.answers is not None: self.start_end_idxs = [get_start_end_idxs(s1,s2) for (s1,s2) in zip(self.contexts, self.answers)] @classmethod def from_df(cls, df, ctx_col='text', q_col='sentiment', id_col='textID', ans_col='selected_text', is_test=False, tokenizer=None): contexts = df[ctx_col].values questions = df[q_col].values text_ids = None if id_col is None else df[id_col].values answers = None if is_test else df[ans_col].values return cls(contexts, questions, text_ids, answers, tokenizer) def __getitem__(self, i): input_ids = array(self.outputs[i].ids) attention_mask = array(self.outputs[i].attention_mask) offsets = array(self.outputs[i].offsets) tokens = array(self.outputs[i].tokens) res = {"input_ids": input_ids, "attention_mask": attention_mask, "offsets": offsets, "tokens": tokens, "context_text": self.contexts[i]} if self.answers is not None: answer_text = self.answers[i] start_tok_idx, end_tok_idx = get_start_end_tok_idxs(offsets, *self.start_end_idxs[i]) res["answer_text"] = answer_text res["start_end_tok_idxs"] = (start_tok_idx, end_tok_idx) if self.text_ids is not None: text_id = self.text_ids[i] res["text_id"] = text_id return res def __len__(self): return len(self.contexts) train_inputs = QAInputGenerator.from_df(train_df, tokenizer=tokenizer) test_inputs = QAInputGenerator.from_df(test_df, is_test=True, tokenizer=tokenizer) i = np.random.choice(range(len(train_inputs))) print(train_inputs[i].keys()) print(train_inputs[i]['tokens'][train_inputs[i]['start_end_tok_idxs'][0]:train_inputs[i]['start_end_tok_idxs'][1]+1]) print(train_inputs[i]['answer_text']) i = np.random.choice(range(len(test_inputs))) print(test_inputs[i].keys()) print(test_inputs[i]['tokens'][test_inputs[i]['attention_mask'].astype(bool)]) train_inputs = list(train_inputs) test_inputs = list(test_inputs) len(train_inputs), len(test_inputs) ``` ### TSEDataAugmentor #### 1) Random Left - Right Truncate ``` -> tok3 anstok anstok anstok tok7 (rand left and right idxs) -> tok3 anstok anstok anstok tok7 tok8 (rand left idx) -> Tok1 tok2 tok3 anstok anstok anstok tok7 (rand right idx) ``` #### 2) Random Mask ``` -> Tok1 tok2 <MASK> anstok anstok anstok tok7 <MASK> -> Tok1 tok2 <UNK> anstok anstok anstok tok7 <UNK> ``` #### 3) Replace with pseudolabel ``` #export class TSEDataAugmentor: def __init__(self, tokenizer, input_ids, attention_mask, start_position, end_position): self.tokenizer = tokenizer self.input_ids = input_ids self.attention_mask = attention_mask # initial answer start and end positions self.ans_start_pos, self.ans_end_pos = start_position.item(), end_position.item() # context token start and end excluding bos - eos tokens self.context_start_pos = 4 self.context_end_pos = torch.where(attention_mask)[0][-1].item() - 1 # left and right indexes excluding answer tokens and eos token @property def left_idxs(self): return np.arange(self.context_start_pos, self.ans_start_pos) @property def right_idxs(self): return np.arange(self.ans_end_pos+1, self.context_end_pos+1) @property def left_right_idxs(self): return np.concatenate([self.left_idxs, self.right_idxs]) @property def rand_left_idx(self): return np.random.choice(self.left_idxs) if self.left_idxs.size > 0 else None @property def rand_right_idx(self): return np.random.choice(self.right_idxs) if self.right_idxs.size > 0 else None def right_truncate(self, right_idx): """ Truncate context from random right index to beginning, answer pos doesn't change Note: token_type_ids NotImplemented """ if not right_idx: raise Exception("Right index can't be None") # clone for debugging new_input_ids = self.input_ids.clone() nopad_input_ids = new_input_ids[self.attention_mask.bool()] # truncate from right idx to beginning - add eos_token_id to end truncated = torch.cat([nopad_input_ids[:right_idx+1], tensor([self.tokenizer.eos_token_id])]) # pad new context until size are equal # replace original input context with new n_pad = len(nopad_input_ids) - len(truncated) new_context = F.pad(truncated, (0,n_pad), value=self.tokenizer.pad_token_id) new_input_ids[:self.context_end_pos+2] = new_context # find new attention mask, update new context end position (exclude eos token) # Note: context start doesn't change since we don't manipulate question new_attention_mask = tensor([1 if i != 1 else 0 for i in new_input_ids]) new_context_end_pos = torch.where(new_attention_mask)[0][-1].item() - 1 self.context_end_pos = new_context_end_pos # update input_ids and attention_masks self.input_ids = new_input_ids self.attention_mask = new_attention_mask return self.input_ids, self.attention_mask, (tensor(self.ans_start_pos), tensor(self.ans_end_pos)) def random_right_truncate(self): right_idx = self.rand_right_idx if right_idx: self.right_truncate(right_idx) def left_truncate(self, left_idx): """ Truncate context from random left index to end, answer pos changes too Note: token_type_ids NotImplemented """ if not left_idx: raise Exception("Left index can't be None") # clone for debugging new_input_ids = self.input_ids.clone() # pad new context until size are equal # replace original input context with new n_pad = len(new_input_ids[self.context_start_pos:]) - len(new_input_ids[left_idx:]) new_context = F.pad(new_input_ids[left_idx:], (0,n_pad), value=self.tokenizer.pad_token_id) new_input_ids[self.context_start_pos:] = new_context # find new attention mask, update new context end position (exclude eos token) # Note: context start doesn't change since we don't manipulate question new_attention_mask = tensor([1 if i != 1 else 0 for i in new_input_ids]) new_context_end_pos = torch.where(new_attention_mask)[0][-1].item() - 1 self.context_end_pos = new_context_end_pos # find new answer start and end positions # update new answer start and end positions ans_shift = left_idx - self.context_start_pos self.ans_start_pos, self.ans_end_pos = self.ans_start_pos-ans_shift, self.ans_end_pos-ans_shift # update input_ids and attention_masks self.input_ids = new_input_ids self.attention_mask = new_attention_mask return self.input_ids, self.attention_mask, (tensor(self.ans_start_pos), tensor(self.ans_end_pos)) def random_left_truncate(self): left_idx = self.rand_left_idx if left_idx: self.left_truncate(left_idx) def replace_with_mask(self, idxs_to_mask): """ Replace given input ids with tokenizer.mask_token_id """ # clone for debugging new_input_ids = self.input_ids.clone() new_input_ids[idxs_to_mask] = tensor([self.tokenizer.mask_token_id]*len(idxs_to_mask)) self.input_ids = new_input_ids def random_replace_with_mask(self, mask_p=0.2): """ mask_p: Proportion of tokens to replace with mask token id """ idxs_to_mask = np.random.choice(self.left_right_idxs, int(len(self.left_right_idxs)*mask_p)) if idxs_to_mask.size > 0: self.replace_with_mask(idxs_to_mask) i = np.random.choice(range(len(train_inputs))) input_ids = tensor(train_inputs[i]['input_ids']) attention_mask = tensor(train_inputs[i]['attention_mask']) start_position, end_position = train_inputs[i]['start_end_tok_idxs'] start_position, end_position = tensor(start_position), tensor(end_position) answer_text = train_inputs[i]['answer_text'] context_text = train_inputs[i]['context_text'] offsets = train_inputs[i]['offsets'] input_ids[attention_mask.bool()] start_position, end_position answer_text, context_text, start_position.item(), end_position.item() " ".join([tokenizer.id_to_token(o) for o in input_ids[attention_mask.bool()]]) " ".join([tokenizer.id_to_token(o) for o in input_ids[start_position.item(): end_position.item()+1]]) char_start = min(np.concatenate([offsets[start_position.item()], offsets[end_position.item()]])) char_end = max(np.concatenate([offsets[start_position.item()], offsets[end_position.item()]])) context_text[char_start:char_end] def convert_ids_to_tokens(toks): return [tokenizer.id_to_token(o) for o in toks] tokenizer.convert_ids_to_tokens = convert_ids_to_tokens ``` ### demo right truncate ``` da = TSEDataAugmentor(tokenizer, input_ids, attention_mask, start_position, end_position) da.random_right_truncate() print(" ".join(tokenizer.convert_ids_to_tokens(da.input_ids[da.attention_mask.bool()]))) print() print(" ".join(tokenizer.convert_ids_to_tokens(da.input_ids[da.ans_start_pos :da.ans_end_pos+1]))) ``` ### demo left truncate ``` da = TSEDataAugmentor(tokenizer, input_ids, attention_mask, start_position, end_position) da.random_left_truncate() print(" ".join(tokenizer.convert_ids_to_tokens(da.input_ids[da.attention_mask.bool()]))) print() print(" ".join(tokenizer.convert_ids_to_tokens(da.input_ids[da.ans_start_pos :da.ans_end_pos+1]))) da.ans_start_pos, da.ans_end_pos ``` ### demo replace with mask ``` da = TSEDataAugmentor(tokenizer, input_ids, attention_mask, start_position, end_position) da.random_replace_with_mask(0.2) print(" ".join(tokenizer.convert_ids_to_tokens(da.input_ids[da.attention_mask.bool()]))) print() print(" ".join(tokenizer.convert_ids_to_tokens(da.input_ids[da.ans_start_pos :da.ans_end_pos+1]))) da.left_idxs, da.right_idxs ``` ### demo all ``` da = TSEDataAugmentor(tokenizer, input_ids, attention_mask, start_position, end_position) da.random_left_truncate() da.random_right_truncate() da.random_replace_with_mask(0.3) print(" ".join(tokenizer.convert_ids_to_tokens(da.input_ids[da.attention_mask.bool()]))) print() print(" ".join(tokenizer.convert_ids_to_tokens(da.input_ids[da.ans_start_pos :da.ans_end_pos+1]))) ``` ### TSEDataset ``` #export do_tfms = {} do_tfms["random_left_truncate"] = {"p":.3} do_tfms["random_right_truncate"] = {"p":.3} do_tfms["random_replace_with_mask"] = {"p":.3, "mask_p":0.2} do_tfms["random_replace_with_pseudo"] = {"p":.3} do_tfms pseudo_df = pd.read_csv("../data/pseudo_labels/pseudo_labelled_sample.csv") pseudo_df = pseudo_df[['ids', 'text', 'target', 'predicted_answer']] pseudo_df.head() pseudo_df.shape #export class TSEDataset(Dataset): def __init__(self, inputs, tokenizer=None, is_test=False, do_tfms:Dict=None, pseudo_inputs=None): # eval self.inputs = inputs # augmentation self.is_test = is_test self.tokenizer = tokenizer self.do_tfms = do_tfms self.pseudo_inputs = pseudo_inputs if self.pseudo_inputs: self.pseudo_idxs = list(range(len(self.pseudo_inputs))) def __getitem__(self, i): 'fastai requires (xb, yb) to return' input_ids = tensor(self.inputs[i]['input_ids']) attention_mask = tensor(self.inputs[i]['attention_mask']) if not self.is_test: start_position, end_position = self.inputs[i]['start_end_tok_idxs'] start_position, end_position = tensor(start_position), tensor(end_position) if self.do_tfms: if self.pseudo_inputs and (np.random.uniform() < self.do_tfms["random_replace_with_pseudo"]["p"]): rand_idx = np.random.choice(self.pseudo_idxs) input_ids = tensor(self.pseudo_inputs[rand_idx]['input_ids']) attention_mask = tensor(self.pseudo_inputs[rand_idx]['attention_mask']) start_position, end_position = self.pseudo_inputs[rand_idx]['start_end_tok_idxs'] start_position, end_position = tensor(start_position), tensor(end_position) else: augmentor = TSEDataAugmentor(self.tokenizer, input_ids, attention_mask, start_position, end_position) if np.random.uniform() < self.do_tfms["random_left_truncate"]["p"]: augmentor.random_left_truncate() if np.random.uniform() < self.do_tfms["random_right_truncate"]["p"]: augmentor.random_right_truncate() if np.random.uniform() < self.do_tfms["random_replace_with_mask"]["p"]: augmentor.random_replace_with_mask(self.do_tfms["random_replace_with_mask"]["mask_p"]) input_ids = augmentor.input_ids attention_mask = augmentor.attention_mask start_position, end_position = tensor(augmentor.ans_start_pos), tensor(augmentor.ans_end_pos) xb = (input_ids, attention_mask) if not self.is_test: yb = (start_position, end_position) else: yb = (0,0) return xb, yb def __len__(self): return len(self.inputs) #export do_tfms = {} do_tfms["random_left_truncate"] = {"p":.3} do_tfms["random_right_truncate"] = {"p":.3} do_tfms["random_replace_with_mask"] = {"p":.3, "mask_p":0.2} do_tfms["random_replace_with_pseudo"] = {"p":1.} do_tfms pseudo_inputs = QAInputGenerator.from_df(pseudo_df, tokenizer=tokenizer, q_col='target', id_col='ids', ans_col='predicted_answer') len(pseudo_inputs) train_ds = TSEDataset(train_inputs, tokenizer, is_test=False, do_tfms=do_tfms, pseudo_inputs=pseudo_inputs) test_ds = TSEDataset(test_inputs, tokenizer, is_test=True, do_tfms=None) do_tfms (input_ids, att_masks), (start_idx, end_idx) = train_ds[0] " ".join(tokenizer.convert_ids_to_tokens(input_ids[att_masks.bool()])) " ".join(tokenizer.convert_ids_to_tokens(input_ids[att_masks.bool()][start_idx:end_idx+1])) # ### `predict_answer_text` # TODO: Migrate to proper notebook # #export # def predict_answer_text(start_logits, end_logits, attention_mask, # context_text, char_to_word_offset, token_to_orig_map): # "Find best answer from context" # # find best start and end # context_start, context_end = min(token_to_orig_map), max(token_to_orig_map) # truncated_start_logits = start_logits[attention_mask.bool()][context_start:context_end+1] # truncated_end_logits = end_logits[attention_mask.bool()][context_start:context_end+1] # best_start_idx, best_end_idx = find_best_start_end_idxs(truncated_start_logits, truncated_end_logits) # # generate answer # tok_orig_char_start = token_to_orig_map[best_start_idx+context_start] # tok_orig_char_end = token_to_orig_map[best_end_idx+context_start] # return answer_from_orig_context(context_text, char_to_word_offset, tok_orig_char_start, tok_orig_char_end) # predict_answer_text(start_logits, end_logits, attention_mask, # context_text, char_to_word_offset, token_to_orig_map) ``` ### export ``` from nbdev.export import notebook2script notebook2script() ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. # Automated Machine Learning _**Prepare Data using `azureml.dataprep` for Local Execution**_ ## Contents 1. [Introduction](#Introduction) 1. [Setup](#Setup) 1. [Data](#Data) 1. [Train](#Train) 1. [Results](#Results) 1. [Test](#Test) ## Introduction In this example we showcase how you can use the `azureml.dataprep` SDK to load and prepare data for AutoML. `azureml.dataprep` can also be used standalone; full documentation can be found [here](https://github.com/Microsoft/PendletonDocs). Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook. In this notebook you will learn how to: 1. Define data loading and preparation steps in a `Dataflow` using `azureml.dataprep`. 2. Pass the `Dataflow` to AutoML for a local run. 3. Pass the `Dataflow` to AutoML for a remote run. ## Setup Currently, Data Prep only supports __Ubuntu 16__ and __Red Hat Enterprise Linux 7__. We are working on supporting more linux distros. As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments. ``` import logging import pandas as pd import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace import azureml.dataprep as dprep from azureml.train.automl import AutoMLConfig ws = Workspace.from_config() # choose a name for experiment experiment_name = 'automl-dataprep-local' # project folder project_folder = './sample_projects/automl-dataprep-local' experiment = Experiment(ws, experiment_name) output = {} output['SDK version'] = azureml.core.VERSION output['Subscription ID'] = ws.subscription_id output['Workspace Name'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Project Directory'] = project_folder output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T ``` ## Data ``` # You can use `auto_read_file` which intelligently figures out delimiters and datatypes of a file. # The data referenced here was a 1MB simple random sample of the Chicago Crime data into a local temporary directory. # You can also use `read_csv` and `to_*` transformations to read (with overridable delimiter) # and convert column types manually. example_data = 'https://dprepdata.blob.core.windows.net/demo/crime0-random.csv' dflow = dprep.auto_read_file(example_data).skip(1) # Remove the header row. dflow.get_profile() # As `Primary Type` is our y data, we need to drop the values those are null in this column. dflow = dflow.drop_nulls('Primary Type') dflow.head(5) ``` ### Review the Data Preparation Result You can peek the result of a Dataflow at any range using `skip(i)` and `head(j)`. Doing so evaluates only `j` records for all the steps in the Dataflow, which makes it fast even against large datasets. `Dataflow` objects are immutable and are composed of a list of data preparation steps. A `Dataflow` object can be branched at any point for further usage. ``` X = dflow.drop_columns(columns=['Primary Type', 'FBI Code']) y = dflow.keep_columns(columns=['Primary Type'], validate_column_exists=True) ``` ## Train This creates a general AutoML settings object applicable for both local and remote runs. ``` automl_settings = { "iteration_timeout_minutes" : 10, "iterations" : 2, "primary_metric" : 'AUC_weighted', "preprocess" : True, "verbosity" : logging.INFO } ``` ### Pass Data with `Dataflow` Objects The `Dataflow` objects captured above can be passed to the `submit` method for a local run. AutoML will retrieve the results from the `Dataflow` for model training. ``` automl_config = AutoMLConfig(task = 'classification', debug_log = 'automl_errors.log', X = X, y = y, **automl_settings) local_run = experiment.submit(automl_config, show_output = True) local_run ``` ## Results #### Widget for Monitoring Runs The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete. **Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details. ``` from azureml.widgets import RunDetails RunDetails(local_run).show() ``` #### Retrieve All Child Runs You can also use SDK methods to fetch all the child runs and see individual metrics that we log. ``` children = list(local_run.get_children()) metricslist = {} for run in children: properties = run.get_properties() metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)} metricslist[int(properties['iteration'])] = metrics rundata = pd.DataFrame(metricslist).sort_index(1) rundata ``` ### Retrieve the Best Model Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. ``` best_run, fitted_model = local_run.get_output() print(best_run) print(fitted_model) ``` #### Best Model Based on Any Other Metric Show the run and the model that has the smallest `log_loss` value: ``` lookup_metric = "log_loss" best_run, fitted_model = local_run.get_output(metric = lookup_metric) print(best_run) print(fitted_model) ``` #### Model from a Specific Iteration Show the run and the model from the first iteration: ``` iteration = 0 best_run, fitted_model = local_run.get_output(iteration = iteration) print(best_run) print(fitted_model) ``` ## Test #### Load Test Data For the test data, it should have the same preparation step as the train data. Otherwise it might get failed at the preprocessing step. ``` dflow_test = dprep.auto_read_file(path='https://dprepdata.blob.core.windows.net/demo/crime0-test.csv').skip(1) dflow_test = dflow_test.drop_nulls('Primary Type') ``` #### Testing Our Best Fitted Model We will use confusion matrix to see how our model works. ``` from pandas_ml import ConfusionMatrix y_test = dflow_test.keep_columns(columns=['Primary Type']).to_pandas_dataframe() X_test = dflow_test.drop_columns(columns=['Primary Type', 'FBI Code']).to_pandas_dataframe() ypred = fitted_model.predict(X_test) cm = ConfusionMatrix(y_test['Primary Type'], ypred) print(cm) cm.plot() ```
github_jupyter
# Introduction to AlTar/Pyre applications ### 1. Introduction An AlTar application is based on the [pyre](https://github.com/pyre/pyre) framework. Compared with traditional Python programming, the `pyre` framework provides enhanced features for developing high performance scientific applications, including - It introduces a new programming model based on configurable components. A configurable component can be an attribute/parameter, or a method/protocol which may have different implementations. The latter will be especially helpful for users to swap between different algorithms/methods for a given procedure at runtime. - Configurable components also offer users an easy way to configure parameters and settings in an application. To pass parameters through command line (e.g, by `argparse`) and property `setter` is usually a formidable task for applications with a large parameter set. In pyre, this can be done by one `json`-type configuration file. - An AlTar/pyre application can deploy itself automatically to different computing platforms, such as a standalone computer, GPU workstations, computer clusters or clouds, with a simple change of the `shell` configuration, a configurable component. - Pyre also integrates high performance scientific libraries such as [GNU Scientific Library](https://www.gnu.org/software/gsl/) (for linear algebra and statistics), and [CUDA](https://developer.nvidia.com/cuda-downloads) (for GPU accelerated computing). It also offers an easy procedure for users to develop their own applications with mixed Python/C/C++/Fortran/CUDA programming, to achieve both high performance and user-friendly interfaces. In this tutorial, we will use a `Hello world!` application to demonstrate how an AlTar application, with configurable components, is constructed and runs slightly differently from conventional Python scripts. ### 2. The Hello application We create below an application to say "Hello" to someone (attribute `who`) several times (attribute `times`). ``` # import the altar module import altar # create an application based on altar.application class HelloApp(altar.application, family='altar.applications.hello'): """ A specialized AlTar application to say hello """ # user configurable components who = altar.properties.str(default='world') who.doc = "the person to say hello to" times = altar.properties.int(default=1) times.validators = altar.constraints.isPositive() times.doc = "how many times you want to say hello" # define methods def main(self): """ The main method """ for i in range(self.times): print(f"Hello {self.who}!") # all done return ``` The `HelloApp` application is derived from the `altar.application` base class in order to inherit various features offered by the pyre framework. It has two attributes, `who` and `times`, which are defined as configurable compnents. A component can be one of the basic Python data types, specified by altar.properties.[int, float, str, list, dict ...], or a user-defined component class. To run the HelloApp, we create an instance with a name='hello'. We pass the settings of `who` and `times` by a configuration file [hello.pfg](hello.pfg) (in default, the app instance searches for a `NAME.pfg` configuration file with `NAME` the same as the instance name): ``` ; application instance name hello: ; components configuration who = AlTar users ; no start/end quotes for strings are needed in pfg file times = 3 ``` In a `pfg` (pyre config) configuration file, indents are used to show the hierarchy of each configurable component. An alternative is to use the dot notation in Python, e.g., ``` ; an alternative way to write configurations hello.who = AlTar users hello.times = 3 ``` ``` # create a HelloApp instance with a name helloapp = HelloApp(name='hello') # when it is created, it searches for settings in hello.pfg to initialize configurable components # run the instance main method helloapp.run() ``` Once an instance is created(registered), all its components are processed to be regular Python objects which you may access/modify. ``` print(f"'{helloapp.who}' is going to be changed") helloapp.who='pyre users' helloapp.main() ``` You may also modify the [hello.pfg](hello.pfg) file for new configurations and re-run the program. Caveat: for jupyter/ipython, you may need to restart the kernel for new settings to be accepted. ### 3. Run HelloApp from command line AlTar/pyre applications are designed to run as regular shell applications, which offer more options to run with command line arguments. We create a [hello.py](hello.py) script to include the `HelloApp` class definition as well as to define a `__main__` method to create an instance and call `main()`. ``` # bootstrap if __name__ == "__main__": # build an instance of the default app app = HelloApp(name="hello") # invoke the main entry point status = app.main() # share raise SystemExit(status) ``` ``` # run hello app from a shell with cmdLine settings !python3 hello.py --who="World" --times=1 ``` By default, the app instance searches for the configuration file named `hello.pfg` as its name='hello'. It is also possible to use a different configuration file by a ``--config`` option. ``` ; hello2.pfg ; application instance name (still need to be the same as the instance name) hello: ; configurations who = pyre users times = 1 ``` ``` # run hello app with a specified configuration file !python3 hello.py --config=hello2.pfg # run hello app with both a configuration file and cmdLine settings # pfg file settings will be overriden by the cmdLine ones !python3 hello.py --config=hello2.pfg --times=2 ```
github_jupyter
# Hacking Into FasterRcnn in Pytorch - toc: true - badges: true - comments: true - categories: [jupyter] - image: images/chart-preview.png # Brief Intro In the post I will show how to tweak some of the internals of FaterRcnn in Pytorch. I am assuming the reader is someone who already have trained an object detection model using pytorch. If not there is and excellent tutorial in [pytorch website](https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html). ## Small Insight into the model Basically Faster Rcnn is a two stage detector 1. The first stage is the Region proposal network which is resposible for knowing the objectness and corresponding bounding boxes. So essentially the RegionProposalNetwork will give the proposals of whether and object is there or not 2. These proposals will be used by the RoIHeads which outputs the detections . * Inside the RoIHeads roi align is done * There will be a box head and box predictor * The losses for the predictions 3. In this post i will try to show how we can add custom parts to the torchvision FasterRcnn ``` #collapse-hide import torch import torchvision from torchvision.models.detection import FasterRCNN from torchvision.models.detection.rpn import AnchorGenerator from torchvision.models.detection.faster_rcnn import FastRCNNPredictor import torch.nn as nn import torch.nn.functional as F print(f'torch version {torch.__version__}') print(f'torchvision version {torchvision.__version__}') ``` # Custom Backone 1. The backbone can be without FeaturePyramidNetwork 2. With FeaturePyramidNetwork ## Custom Backbone without FPN This is pretty well written in the pytorch tutorials section, i will add some comments to it additionally ``` backbone = torchvision.models.mobilenet_v2(pretrained=True).features #we need to specify an outchannel of this backone specifically because this outchannel will be #used as an inchannel for the RPNHEAD which is producing the out of RegionProposalNetwork #we can know the number of outchannels by looking into the backbone "backbone??" backbone.out_channels = 1280 #by default the achor generator FasterRcnn assign will be for a FPN backone, so #we need to specify a different anchor generator anchor_generator = AnchorGenerator(sizes=((128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) #here at each position in the grid there will be 3x3=9 anchors #and if our backbone is not FPN then the forward method will assign the name '0' to feature map #so we need to specify '0 as feature map name' roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=9, sampling_ratio=2) #the output size is the output shape of the roi pooled features which will be used by the box head model = FasterRCNN(backbone,num_classes=2,rpn_anchor_generator=anchor_generator) model.eval() x = [torch.rand(3, 300, 400), torch.rand(3, 500, 600)] predictions = model(x) ``` ## Custom Backbone with FPN The Resnet50Fpn available in torchvision ``` # load a model pre-trained pre-trained on COCO model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) # replace the classifier with a new one, that has # num_classes which is user-defined num_classes = 2 # 1 class (person) + background # get number of input features for the classifier in_features = model.roi_heads.box_predictor.cls_score.in_features # replace the pre-trained head with a new one model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) model.eval() x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] predictions = model(x) ``` ### Adding a different resenet backbone 1. Just change to a different resenet 1. Shows how we should change roi_pooler and anchor_generator along with the backbone changes if we are not using all the layers from FPN ### Using all layers from FPN ``` #hte returned layers are layer1,layer2,layer3,layer4 in returned_layers backbone = torchvision.models.detection.backbone_utils.resnet_fpn_backbone('resnet101',pretrained=True) model = FasterRCNN(backbone,num_classes=2) model.eval() x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] predictions = model(x) ``` ### Using not all layers from FPN The size of the last fature map in a Resnet50.Later i will show the sizes of the feature maps we use when we use FPN. ``` #collapse-hide #just to show what will be out of of a normal resnet without fpn res = torchvision.models.resnet50() pure = nn.Sequential(*list(res.children())[:-2]) temp = torch.rand(1,3,400,400) pure(temp).shape ``` The required layers can be obtained by specifying the returned layers parameters.Also the resnet backbone of different depth can be used. ``` #the returned layers are layer1,layer2,layer3,layer4 in returned_layers backbone = torchvision.models.detection.backbone_utils.resnet_fpn_backbone('resnet101',pretrained=True, returned_layers=[2,3,4]) ``` Here we are using feature maps of the following shapes. ``` #collapse-hide out = backbone(temp) for i in out.keys(): print(i,' ',out[i].shape) #from the above we can see that the feature are feat maps should be 0,1,2,pool #where pool comes from the default extra block roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0','1','2','pool'], output_size=7, sampling_ratio=2) ``` So essentially what we did was we selected the last three layers in FPN by specifying them in the returned layers, by default, the backbone will add a pool layer on top of the last layer. So we are left with four layers. Now the RoIAlign need to be done in these four layers. If we dnt specify the RoIAlign it will use the by default assume we have used all layers from FPN in torchvision. So we need to specifically give the feauture maps that we used. The usage of feature maps can be our application specific, some time you might need to detect small objects sometimes the object of interest will be large objects only. ``` #we will need to give anchor_generator because the deafault anchor generator assumes we use all layers in fpn #since we have four layers in fpn here we need to specify 4 anchors anchor_sizes = ((32), (64), (128),(256) ) aspect_ratios = ((0.5,1.0, 1.5,2.0,)) * len(anchor_sizes) anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios) ``` Since we have four layers in our FPN we need to specify the anchors. So here each feature map will have 4 anchors at each position.So the first feature map will have anchor size 32 and four of them will be there at each position in the feature map of aspect_ratios (0.5,1.0, 1.5,2.0). Now we can pass these to the FasterRCNN class ``` model = FasterRCNN(backbone,num_classes=2,rpn_anchor_generator=anchor_generator,box_roi_pool=roi_pooler) model.eval() x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] predictions = model(x) ``` # Custom Predictor The predictor is what that outputs the classes and the corresponding bboxes . By default these have two layers one for class and one for bboxes,but we can add more before it if we want to,so if you have a ton of data this might come handy,(remember there is already a box head before the predictor head, so you might not need this) ``` class Custom_predictor(nn.Module): def __init__(self,in_channels,num_classes): super(Custom_predictor,self).__init__() self.additional_layer = nn.Linear(in_channels,in_channels) #this is the additional layer self.cls_score = nn.Linear(in_channels, num_classes) self.bbox_pred = nn.Linear(in_channels, num_classes * 4) def forward(self,x): if x.dim() == 4: assert list(x.shape[2:]) == [1, 1] x = x.flatten(start_dim=1) x = self.additional_layer(x) scores = self.cls_score(x) bbox_deltas = self.bbox_pred(x) return scores, bbox_deltas model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) #we need the out channels of the box head to pass tpp custom predictor in_features = model.roi_heads.box_head.fc7.out_features #now we can add the custom predictor to the model num_classes =2 model.roi_heads.box_predictor = Custom_predictor(in_features,num_classes) model.eval() x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] predictions = model(x) ``` # Custom BoxHead The ouptuts of the roi_align are first passed through the box head before they are passed to the Predictor, there are two linear layers and we can customize them as we want, be careful with the dimensions since they can break the pipeline ``` model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) class CustomHead(nn.Module): def __init__(self,in_channels,roi_outshape,representation_size): super(CustomHead,self).__init__() self.conv = nn.Conv2d(in_channels,in_channels,kernel_size=3,padding=1)#this is teh additional layer adde #we will be sending a flattened layer, the size will eb in_channels*w*h, here roi_outshape represents it self.fc6 = nn.Linear(in_channels*roi_outshape**2, representation_size) self.fc7 = nn.Linear(representation_size, representation_size) def forward(self,x): # breakpoint() x = self.conv(x) x = x.flatten(start_dim=1) import torch.nn.functional as F x = F.relu(self.fc6(x)) x = F.relu(self.fc7(x)) return x ``` 1. We need in_channels and representation size, remember the output of this is the input of box_predictor, so we can get the representation size of box_head from the input of box_predictor. 2. The in_channels can be got from the backbone out channels. 3. After the flattening the width and height also need to be considered which we wil get from roi_pool output. ``` in_channels = model.backbone.out_channels roi_outshape = model.roi_heads.box_roi_pool.output_size[0] representation_size=model.roi_heads.box_predictor.cls_score.in_features model.roi_heads.box_head = CustomHead(in_channels,roi_outshape,representation_size) num_classes=2 model.roi_heads.box_predictor = FastRCNNPredictor(representation_size, num_classes) model.eval() x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] predictions = model(x) ``` # CustomLoss Function This is the modification for loss of FasterRcnn Predictor. 1. You can modify the loss by defining the fastrcnn_loss and making chages where you want. 2. Then pass as say model.roi_heads.fastrcnn_loss = Custom_loss 3. Usually we replace the F.crossentropy loss by say Focal loss or label smoothing loss ``` import torchvision.models.detection._utils as det_utils import torch.nn.functional as F ``` The below loss function is taken from [Aman Aroras blog](https://amaarora.github.io/2020/07/18/label-smoothing.html). ``` # Helper functions from fastai def reduce_loss(loss, reduction='mean'): return loss.mean() if reduction=='mean' else loss.sum() if reduction=='sum' else loss # Implementation from fastai https://github.com/fastai/fastai2/blob/master/fastai2/layers.py#L338 class LabelSmoothingCrossEntropy(nn.Module): def __init__(self, ε:float=0.1, reduction='mean'): super().__init__() self.ε,self.reduction = ε,reduction def forward(self, output, target): # number of classes c = output.size()[-1] log_preds = F.log_softmax(output, dim=-1) loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction) nll = F.nll_loss(log_preds, target, reduction=self.reduction) # (1-ε)* H(q,p) + ε*H(u,p) return (1-self.ε)*nll + self.ε*(loss/c) custom_loss = LabelSmoothingCrossEntropy() #torchvision.models.detection.roi_heads.fastrcnn_loss?? def custom_fastrcnn_loss(class_logits, box_regression, labels, regression_targets): # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] """ Computes the loss for Faster R-CNN. Arguments: class_logits (Tensor) box_regression (Tensor) labels (list[BoxList]) regression_targets (Tensor) Returns: classification_loss (Tensor) box_loss (Tensor) """ labels = torch.cat(labels, dim=0) regression_targets = torch.cat(regression_targets, dim=0) classification_loss = custom_loss(class_logits, labels) #ADDING THE CUSTOM LOSS HERE # get indices that correspond to the regression targets for # the corresponding ground truth labels, to be used with # advanced indexing sampled_pos_inds_subset = torch.where(labels > 0)[0] labels_pos = labels[sampled_pos_inds_subset] N, num_classes = class_logits.shape box_regression = box_regression.reshape(N, -1, 4) box_loss = det_utils.smooth_l1_loss( box_regression[sampled_pos_inds_subset, labels_pos], regression_targets[sampled_pos_inds_subset], beta=1 / 9, size_average=False, ) box_loss = box_loss / labels.numel() return classification_loss, box_loss ``` # Note on how to vary the anchor generator The way in which anchor generators are assigned when we use backbone with and without fpn is different. When we are not using FPN there will be only one feature map and for that feature map we need to specify anchors of different shapes. ``` anchor_generator = AnchorGenerator(sizes=((128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) ``` In the above case suppose we have a feature map of shape 7x7, then at each cell in it there will be 9 anchors,three each of shapes 128,256 and 512,with the corresponding aspect rations. But when we are using FPN we have different feature maps, so its more effective we use different feature maps for different layers. Small sized objects are deteted using the earlier feature maps and thus for those we can specify a small sized anchor say 32 and for the later layers we can specify larger anchors. ``` anchor_sizes = ((32), (64), (128),(256) ) aspect_ratios = ((0.5,1.0, 1.5,2.0,)) * len(anchor_sizes) anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios) ``` In the above i am using the same aspect ratio for all the sizes so i am just multiplying by the lenght of the anchor_sizes, but if we want to specify different aspect ratios its totally possible. But be carefull to specifiy the same number of aspect ratios for each anchor sizes # Credits All the above hacks are just modification of the existing wonderful torchvision library.
github_jupyter
<!-- dom:TITLE: PHY321: Harmonic Oscillations, Damping, Resonances and time-dependent Forces --> # PHY321: Harmonic Oscillations, Damping, Resonances and time-dependent Forces <!-- dom:AUTHOR: [Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/) at Department of Physics and Astronomy and Facility for Rare Ion Beams (FRIB), Michigan State University, USA & Department of Physics, University of Oslo, Norway --> <!-- Author: --> **[Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/)**, Department of Physics and Astronomy and Facility for Rare Ion Beams (FRIB), Michigan State University, USA and Department of Physics, University of Oslo, Norway Date: **Mar 1, 2021** Copyright 1999-2021, [Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/). Released under CC Attribution-NonCommercial 4.0 license ## Aims and Overarching Motivation ### Monday Damped oscillations. Analytical and numerical solutions **Reading suggestion**: Taylor sections 5.4-5.5. ### Wednesday No lecture, study day ### Friday Driven oscillations and resonances with examples. **Reading suggestion**: Taylor sections 5.5-5.6. ## Damped Oscillators We consider only the case where the damping force is proportional to the velocity. This is counter to dragging friction, where the force is proportional in strength to the normal force and independent of velocity, and is also inconsistent with wind resistance, where the magnitude of the drag force is proportional the square of the velocity. Rolling resistance does seem to be mainly proportional to the velocity. However, the main motivation for considering damping forces proportional to the velocity is that the math is more friendly. This is because the differential equation is linear, i.e. each term is of order $x$, $\dot{x}$, $\ddot{x}\cdots$, or even terms with no mention of $x$, and there are no terms such as $x^2$ or $x\ddot{x}$. The equations of motion for a spring with damping force $-b\dot{x}$ are <!-- Equation labels as ordinary links --> <div id="_auto1"></div> $$ \begin{equation} m\ddot{x}+b\dot{x}+kx=0. \label{_auto1} \tag{1} \end{equation} $$ ## Harmonic Oscillator, Damping Just to make the solution a bit less messy, we rewrite this equation as <!-- Equation labels as ordinary links --> <div id="eq:dampeddiffyq"></div> $$ \begin{equation} \label{eq:dampeddiffyq} \tag{2} \ddot{x}+2\beta\dot{x}+\omega_0^2x=0,~~~~\beta\equiv b/2m,~\omega_0\equiv\sqrt{k/m}. \end{equation} $$ Both $\beta$ and $\omega$ have dimensions of inverse time. To find solutions (see appendix C in the text) you must make an educated guess at the form of the solution. To do this, first realize that the solution will need an arbitrary normalization $A$ because the equation is linear. Secondly, realize that if the form is <!-- Equation labels as ordinary links --> <div id="_auto2"></div> $$ \begin{equation} x=Ae^{rt} \label{_auto2} \tag{3} \end{equation} $$ that each derivative simply brings out an extra power of $r$. This means that the $Ae^{rt}$ factors out and one can simply solve for an equation for $r$. Plugging this form into Eq. ([2](#eq:dampeddiffyq)), <!-- Equation labels as ordinary links --> <div id="_auto3"></div> $$ \begin{equation} r^2+2\beta r+\omega_0^2=0. \label{_auto3} \tag{4} \end{equation} $$ ## Harmonic Oscillator, Solutions of Damped Motion Because this is a quadratic equation there will be two solutions, <!-- Equation labels as ordinary links --> <div id="_auto4"></div> $$ \begin{equation} r=-\beta\pm\sqrt{\beta^2-\omega_0^2}. \label{_auto4} \tag{5} \end{equation} $$ We refer to the two solutions as $r_1$ and $r_2$ corresponding to the $+$ and $-$ roots. As expected, there should be two arbitrary constants involved in the solution, <!-- Equation labels as ordinary links --> <div id="_auto5"></div> $$ \begin{equation} x=A_1e^{r_1t}+A_2e^{r_2t}, \label{_auto5} \tag{6} \end{equation} $$ where the coefficients $A_1$ and $A_2$ are determined by initial conditions. The roots listed above, $\sqrt{\omega_0^2-\beta_0^2}$, will be imaginary if the damping is small and $\beta<\omega_0$. In that case, $r$ is complex and the factor $\exp{(rt)}$ will have some oscillatory behavior. If the roots are real, there will only be exponentially decaying solutions. There are three cases: ## Underdamped: $\beta<\omega_0$ $$ \begin{eqnarray} x&=&A_1e^{-\beta t}e^{i\omega't}+A_2e^{-\beta t}e^{-i\omega't},~~\omega'\equiv\sqrt{\omega_0^2-\beta^2}\\ \nonumber &=&(A_1+A_2)e^{-\beta t}\cos\omega't+i(A_1-A_2)e^{-\beta t}\sin\omega't. \end{eqnarray} $$ Here we have made use of the identity $e^{i\omega't}=\cos\omega't+i\sin\omega't$. Because the constants are arbitrary, and because the real and imaginary parts are both solutions individually, we can simply consider the real part of the solution alone: <!-- Equation labels as ordinary links --> <div id="eq:homogsolution"></div> $$ \begin{eqnarray} \label{eq:homogsolution} \tag{7} x&=&B_1e^{-\beta t}\cos\omega't+B_2e^{-\beta t}\sin\omega't,\\ \nonumber \omega'&\equiv&\sqrt{\omega_0^2-\beta^2}. \end{eqnarray} $$ ## Critical dampling: $\beta=\omega_0$ In this case the two terms involving $r_1$ and $r_2$ are identical because $\omega'=0$. Because we need to arbitrary constants, there needs to be another solution. This is found by simply guessing, or by taking the limit of $\omega'\rightarrow 0$ from the underdamped solution. The solution is then <!-- Equation labels as ordinary links --> <div id="eq:criticallydamped"></div> $$ \begin{equation} \label{eq:criticallydamped} \tag{8} x=Ae^{-\beta t}+Bte^{-\beta t}. \end{equation} $$ The critically damped solution is interesting because the solution approaches zero quickly, but does not oscillate. For a problem with zero initial velocity, the solution never crosses zero. This is a good choice for designing shock absorbers or swinging doors. ## Overdamped: $\beta>\omega_0$ $$ \begin{eqnarray} x&=&A_1\exp{-(\beta+\sqrt{\beta^2-\omega_0^2})t}+A_2\exp{-(\beta-\sqrt{\beta^2-\omega_0^2})t} \end{eqnarray} $$ This solution will also never pass the origin more than once, and then only if the initial velocity is strong and initially toward zero. Given $b$, $m$ and $\omega_0$, find $x(t)$ for a particle whose initial position is $x=0$ and has initial velocity $v_0$ (assuming an underdamped solution). The solution is of the form, $$ \begin{eqnarray*} x&=&e^{-\beta t}\left[A_1\cos(\omega' t)+A_2\sin\omega't\right],\\ \dot{x}&=&-\beta x+\omega'e^{-\beta t}\left[-A_1\sin\omega't+A_2\cos\omega't\right].\\ \omega'&\equiv&\sqrt{\omega_0^2-\beta^2},~~~\beta\equiv b/2m. \end{eqnarray*} $$ From the initial conditions, $A_1=0$ because $x(0)=0$ and $\omega'A_2=v_0$. So $$ x=\frac{v_0}{\omega'}e^{-\beta t}\sin\omega't. $$ ## Harmonic Oscillator, Solutions Consider a single solution with no arbitrary constants, which we will call a **particular solution**, $x_p(t)$. It should be emphasized that this is **A** particular solution, because there exists an infinite number of such solutions because the general solution should have two arbitrary constants. Now consider solutions to the same equation without the driving term, which include two arbitrary constants. These are called either **homogenous solutions** or **complementary solutions**, and were given in the previous section, e.g. Eq. ([7](#eq:homogsolution)) for the underdamped case. The homogenous solution already incorporates the two arbitrary constants, so any sum of a homogenous solution and a particular solution will represent the **general solution** of the equation. The general solution incorporates the two arbitrary constants $A$ and $B$ to accommodate the two initial conditions. One could have picked a different particular solution, i.e. the original particular solution plus any homogenous solution with the arbitrary constants $A_p$ and $B_p$ chosen at will. When one adds in the homogenous solution, which has adjustable constants with arbitrary constants $A'$ and $B'$, to the new particular solution, one can get the same general solution by simply adjusting the new constants such that $A'+A_p=A$ and $B'+B_p=B$. Thus, the choice of $A_p$ and $B_p$ are irrelevant, and when choosing the particular solution it is best to make the simplest choice possible. ## Harmonic Oscillator, Particular Solution To find a particular solution, one first guesses at the form, <!-- Equation labels as ordinary links --> <div id="eq:partform"></div> $$ \begin{equation} \label{eq:partform} \tag{9} x_p(t)=D\cos(\omega t-\delta), \end{equation} $$ and rewrite the differential equation as <!-- Equation labels as ordinary links --> <div id="_auto6"></div> $$ \begin{equation} D\left\{-\omega^2\cos(\omega t-\delta)-2\beta\omega\sin(\omega t-\delta)+\omega_0^2\cos(\omega t-\delta)\right\}=\frac{F_0}{m}\cos(\omega t). \label{_auto6} \tag{10} \end{equation} $$ One can now use angle addition formulas to get $$ \begin{eqnarray} D\left\{(-\omega^2\cos\delta+2\beta\omega\sin\delta+\omega_0^2\cos\delta)\cos(\omega t)\right.&&\\ \nonumber \left.+(-\omega^2\sin\delta-2\beta\omega\cos\delta+\omega_0^2\sin\delta)\sin(\omega t)\right\} &=&\frac{F_0}{m}\cos(\omega t). \end{eqnarray} $$ Both the $\cos$ and $\sin$ terms need to equate if the expression is to hold at all times. Thus, this becomes two equations $$ \begin{eqnarray} D\left\{-\omega^2\cos\delta+2\beta\omega\sin\delta+\omega_0^2\cos\delta\right\}&=&\frac{F_0}{m}\\ \nonumber -\omega^2\sin\delta-2\beta\omega\cos\delta+\omega_0^2\sin\delta&=&0. \end{eqnarray} $$ After dividing by $\cos\delta$, the lower expression leads to <!-- Equation labels as ordinary links --> <div id="_auto7"></div> $$ \begin{equation} \tan\delta=\frac{2\beta\omega}{\omega_0^2-\omega^2}. \label{_auto7} \tag{11} \end{equation} $$ ## Solving with Driven Oscillations Using the identities $\tan^2+1=\csc^2$ and $\sin^2+\cos^2=1$, one can also express $\sin\delta$ and $\cos\delta$, $$ \begin{eqnarray} \sin\delta&=&\frac{2\beta\omega}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}},\\ \nonumber \cos\delta&=&\frac{(\omega_0^2-\omega^2)}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}} \end{eqnarray} $$ Inserting the expressions for $\cos\delta$ and $\sin\delta$ into the expression for $D$, <!-- Equation labels as ordinary links --> <div id="eq:Ddrive"></div> $$ \begin{equation} \label{eq:Ddrive} \tag{12} D=\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}. \end{equation} $$ For a given initial condition, e.g. initial displacement and velocity, one must add the homogenous solution then solve for the two arbitrary constants. However, because the homogenous solutions decay with time as $e^{-\beta t}$, the particular solution is all that remains at large times, and is therefore the steady state solution. Because the arbitrary constants are all in the homogenous solution, all memory of the initial conditions are lost at large times, $t>>1/\beta$. The amplitude of the motion, $D$, is linearly proportional to the driving force ($F_0/m$), but also depends on the driving frequency $\omega$. For small $\beta$ the maximum will occur at $\omega=\omega_0$. This is referred to as a resonance. In the limit $\beta\rightarrow 0$ the amplitude at resonance approaches infinity. ## Alternative Derivation for Driven Oscillators Here, we derive the same expressions as in Equations ([9](#eq:partform)) and ([12](#eq:Ddrive)) but express the driving forces as $$ \begin{eqnarray} F(t)&=&F_0e^{i\omega t}, \end{eqnarray} $$ rather than as $F_0\cos\omega t$. The real part of $F$ is the same as before. For the differential equation, <!-- Equation labels as ordinary links --> <div id="eq:compdrive"></div> $$ \begin{eqnarray} \label{eq:compdrive} \tag{13} \ddot{x}+2\beta\dot{x}+\omega_0^2x&=&\frac{F_0}{m}e^{i\omega t}, \end{eqnarray} $$ one can treat $x(t)$ as an imaginary function. Because the operations $d^2/dt^2$ and $d/dt$ are real and thus do not mix the real and imaginary parts of $x(t)$, Eq. ([13](#eq:compdrive)) is effectively 2 equations. Because $e^{\omega t}=\cos\omega t+i\sin\omega t$, the real part of the solution for $x(t)$ gives the solution for a driving force $F_0\cos\omega t$, and the imaginary part of $x$ corresponds to the case where the driving force is $F_0\sin\omega t$. It is rather easy to solve for the complex $x$ in this case, and by taking the real part of the solution, one finds the answer for the $\cos\omega t$ driving force. We assume a simple form for the particular solution <!-- Equation labels as ordinary links --> <div id="_auto8"></div> $$ \begin{equation} x_p=De^{i\omega t}, \label{_auto8} \tag{14} \end{equation} $$ where $D$ is a complex constant. From Eq. ([13](#eq:compdrive)) one inserts the form for $x_p$ above to get $$ \begin{eqnarray} D\left\{-\omega^2+2i\beta\omega+\omega_0^2\right\}e^{i\omega t}=(F_0/m)e^{i\omega t},\\ \nonumber D=\frac{F_0/m}{(\omega_0^2-\omega^2)+2i\beta\omega}. \end{eqnarray} $$ The norm and phase for $D=|D|e^{-i\delta}$ can be read by inspection, <!-- Equation labels as ordinary links --> <div id="_auto9"></div> $$ \begin{equation} |D|=\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}},~~~~\tan\delta=\frac{2\beta\omega}{\omega_0^2-\omega^2}. \label{_auto9} \tag{15} \end{equation} $$ This is the same expression for $\delta$ as before. One then finds $x_p(t)$, <!-- Equation labels as ordinary links --> <div id="eq:fastdriven1"></div> $$ \begin{eqnarray} \label{eq:fastdriven1} \tag{16} x_p(t)&=&\Re\frac{(F_0/m)e^{i\omega t-i\delta}}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}\\ \nonumber &=&\frac{(F_0/m)\cos(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}. \end{eqnarray} $$ This is the same answer as before. If one wished to solve for the case where $F(t)= F_0\sin\omega t$, the imaginary part of the solution would work <!-- Equation labels as ordinary links --> <div id="eq:fastdriven2"></div> $$ \begin{eqnarray} \label{eq:fastdriven2} \tag{17} x_p(t)&=&\Im\frac{(F_0/m)e^{i\omega t-i\delta}}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}\\ \nonumber &=&\frac{(F_0/m)\sin(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}. \end{eqnarray} $$ ## Damped and Driven Oscillator Consider the damped and driven harmonic oscillator worked out above. Given $F_0, m,\beta$ and $\omega_0$, solve for the complete solution $x(t)$ for the case where $F=F_0\sin\omega t$ with initial conditions $x(t=0)=0$ and $v(t=0)=0$. Assume the underdamped case. The general solution including the arbitrary constants includes both the homogenous and particular solutions, $$ \begin{eqnarray*} x(t)&=&\frac{F_0}{m}\frac{\sin(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}} +A\cos\omega't e^{-\beta t}+B\sin\omega't e^{-\beta t}. \end{eqnarray*} $$ The quantities $\delta$ and $\omega'$ are given earlier in the section, $\omega'=\sqrt{\omega_0^2-\beta^2}, \delta=\tan^{-1}(2\beta\omega/(\omega_0^2-\omega^2)$. Here, solving the problem means finding the arbitrary constants $A$ and $B$. Satisfying the initial conditions for the initial position and velocity: $$ \begin{eqnarray*} x(t=0)=0&=&-\eta\sin\delta+A,\\ v(t=0)=0&=&\omega\eta\cos\delta-\beta A+\omega'B,\\ \eta&\equiv&\frac{F_0}{m}\frac{1}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}. \end{eqnarray*} $$ The problem is now reduced to 2 equations and 2 unknowns, $A$ and $B$. The solution is $$ \begin{eqnarray} A&=& \eta\sin\delta ,~~~B=\frac{-\omega\eta\cos\delta+\beta\eta\sin\delta}{\omega'}. \end{eqnarray} $$ ## Resonance Widths; the $Q$ factor From the previous two sections, the particular solution for a driving force, $F=F_0\cos\omega t$, is $$ \begin{eqnarray} x_p(t)&=&\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}\cos(\omega_t-\delta),\\ \nonumber \delta&=&\tan^{-1}\left(\frac{2\beta\omega}{\omega_0^2-\omega^2}\right). \end{eqnarray} $$ If one fixes the driving frequency $\omega$ and adjusts the fundamental frequency $\omega_0=\sqrt{k/m}$, the maximum amplitude occurs when $\omega_0=\omega$ because that is when the term from the denominator $(\omega_0^2-\omega^2)^2+4\omega^2\beta^2$ is at a minimum. This is akin to dialing into a radio station. However, if one fixes $\omega_0$ and adjusts the driving frequency one minimize with respect to $\omega$, e.g. set <!-- Equation labels as ordinary links --> <div id="_auto10"></div> $$ \begin{equation} \frac{d}{d\omega}\left[(\omega_0^2-\omega^2)^2+4\omega^2\beta^2\right]=0, \label{_auto10} \tag{18} \end{equation} $$ and one finds that the maximum amplitude occurs when $\omega=\sqrt{\omega_0^2-2\beta^2}$. If $\beta$ is small relative to $\omega_0$, one can simply state that the maximum amplitude is <!-- Equation labels as ordinary links --> <div id="_auto11"></div> $$ \begin{equation} x_{\rm max}\approx\frac{F_0}{2m\beta \omega_0}. \label{_auto11} \tag{19} \end{equation} $$ $$ \begin{eqnarray} \frac{4\omega^2\beta^2}{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}=\frac{1}{2}. \end{eqnarray} $$ For small damping this occurs when $\omega=\omega_0\pm \beta$, so the $FWHM\approx 2\beta$. For the purposes of tuning to a specific frequency, one wants the width to be as small as possible. The ratio of $\omega_0$ to $FWHM$ is known as the _quality_factor, or $Q$ factor, <!-- Equation labels as ordinary links --> <div id="_auto12"></div> $$ \begin{equation} Q\equiv \frac{\omega_0}{2\beta}. \label{_auto12} \tag{20} \end{equation} $$ ## Numerical Studies of Driven Oscillations Solving the problem of driven oscillations numerically gives us much more flexibility to study different types of driving forces. We can reuse our earlier code by simply adding a driving force. If we stay in the $x$-direction only this can be easily done by adding a term $F_{\mathrm{ext}}(x,t)$. Note that we have kept it rather general here, allowing for both a spatial and a temporal dependence. Before we dive into the code, we need to briefly remind ourselves about the equations we started with for the case with damping, namely $$ m\frac{d^2x}{dt^2} + b\frac{dx}{dt}+kx(t) =0, $$ with no external force applied to the system. Let us now for simplicty assume that our external force is given by $$ F_{\mathrm{ext}}(t) = F_0\cos{(\omega t)}, $$ where $F_0$ is a constant (what is its dimension?) and $\omega$ is the frequency of the applied external driving force. **Small question:** would you expect energy to be conserved now? Introducing the external force into our lovely differential equation and dividing by $m$ and introducing $\omega_0^2=\sqrt{k/m}$ we have $$ \frac{d^2x}{dt^2} + \frac{b}{m}\frac{dx}{dt}+\omega_0^2x(t) =\frac{F_0}{m}\cos{(\omega t)}, $$ Thereafter we introduce a dimensionless time $\tau = t\omega_0$ and a dimensionless frequency $\tilde{\omega}=\omega/\omega_0$. We have then $$ \frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =\frac{F_0}{m\omega_0^2}\cos{(\tilde{\omega}\tau)}, $$ Introducing a new amplitude $\tilde{F} =F_0/(m\omega_0^2)$ (check dimensionality again) we have $$ \frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =\tilde{F}\cos{(\tilde{\omega}\tau)}. $$ Our final step, as we did in the case of various types of damping, is to define $\gamma = b/(2m\omega_0)$ and rewrite our equations as $$ \frac{d^2x}{d\tau^2} + 2\gamma\frac{dx}{d\tau}+x(\tau) =\tilde{F}\cos{(\tilde{\omega}\tau)}. $$ This is the equation we will code below using the Euler-Cromer method. ``` DeltaT = 0.001 #set up arrays tfinal = 20 # in years n = ceil(tfinal/DeltaT) # set up arrays for t, v, and x t = np.zeros(n) v = np.zeros(n) x = np.zeros(n) # Initial conditions as one-dimensional arrays of time x0 = 1.0 v0 = 0.0 x[0] = x0 v[0] = v0 gamma = 0.2 Omegatilde = 0.5 Ftilde = 1.0 # Start integrating using Euler-Cromer's method for i in range(n-1): # Set up the acceleration # Here you could have defined your own function for this a = -2*gamma*v[i]-x[i]+Ftilde*cos(t[i]*Omegatilde) # update velocity, time and position v[i+1] = v[i] + DeltaT*a x[i+1] = x[i] + DeltaT*v[i+1] t[i+1] = t[i] + DeltaT # Plot position as function of time fig, ax = plt.subplots() ax.set_ylabel('x[m]') ax.set_xlabel('t[s]') ax.plot(t, x) fig.tight_layout() save_fig("ForcedBlockEulerCromer") plt.show() ``` In the above example we have focused on the Euler-Cromer method. This method has a local truncation error which is proportional to $\Delta t^2$ and thereby a global error which is proportional to $\Delta t$. We can improve this by using the Runge-Kutta family of methods. The widely popular Runge-Kutta to fourth order or just **RK4** has indeed a much better truncation error. The RK4 method has a global error which is proportional to $\Delta t$. Let us revisit this method and see how we can implement it for the above example. ## Differential Equations, Runge-Kutta methods Runge-Kutta (RK) methods are based on Taylor expansion formulae, but yield in general better algorithms for solutions of an ordinary differential equation. The basic philosophy is that it provides an intermediate step in the computation of $y_{i+1}$. To see this, consider first the following definitions <!-- Equation labels as ordinary links --> <div id="_auto13"></div> $$ \begin{equation} \frac{dy}{dt}=f(t,y), \label{_auto13} \tag{21} \end{equation} $$ and <!-- Equation labels as ordinary links --> <div id="_auto14"></div> $$ \begin{equation} y(t)=\int f(t,y) dt, \label{_auto14} \tag{22} \end{equation} $$ and <!-- Equation labels as ordinary links --> <div id="_auto15"></div> $$ \begin{equation} y_{i+1}=y_i+ \int_{t_i}^{t_{i+1}} f(t,y) dt. \label{_auto15} \tag{23} \end{equation} $$ To demonstrate the philosophy behind RK methods, let us consider the second-order RK method, RK2. The first approximation consists in Taylor expanding $f(t,y)$ around the center of the integration interval $t_i$ to $t_{i+1}$, that is, at $t_i+h/2$, $h$ being the step. Using the midpoint formula for an integral, defining $y(t_i+h/2) = y_{i+1/2}$ and $t_i+h/2 = t_{i+1/2}$, we obtain <!-- Equation labels as ordinary links --> <div id="_auto16"></div> $$ \begin{equation} \int_{t_i}^{t_{i+1}} f(t,y) dt \approx hf(t_{i+1/2},y_{i+1/2}) +O(h^3). \label{_auto16} \tag{24} \end{equation} $$ This means in turn that we have <!-- Equation labels as ordinary links --> <div id="_auto17"></div> $$ \begin{equation} y_{i+1}=y_i + hf(t_{i+1/2},y_{i+1/2}) +O(h^3). \label{_auto17} \tag{25} \end{equation} $$ However, we do not know the value of $y_{i+1/2}$. Here comes thus the next approximation, namely, we use Euler's method to approximate $y_{i+1/2}$. We have then <!-- Equation labels as ordinary links --> <div id="_auto18"></div> $$ \begin{equation} y_{(i+1/2)}=y_i + \frac{h}{2}\frac{dy}{dt}=y(t_i) + \frac{h}{2}f(t_i,y_i). \label{_auto18} \tag{26} \end{equation} $$ This means that we can define the following algorithm for the second-order Runge-Kutta method, RK2. 4 6 < < < ! ! M A T H _ B L O C K <!-- Equation labels as ordinary links --> <div id="_auto20"></div> $$ \begin{equation} k_2=hf(t_{i+1/2},y_i+k_1/2), \label{_auto20} \tag{28} \end{equation} $$ with the final value <!-- Equation labels as ordinary links --> <div id="_auto21"></div> $$ \begin{equation} y_{i+i}\approx y_i + k_2 +O(h^3). \label{_auto21} \tag{29} \end{equation} $$ The difference between the previous one-step methods is that we now need an intermediate step in our evaluation, namely $t_i+h/2 = t_{(i+1/2)}$ where we evaluate the derivative $f$. This involves more operations, but the gain is a better stability in the solution. The fourth-order Runge-Kutta, RK4, has the following algorithm 4 9 < < < ! ! M A T H _ B L O C K $$ k_3=hf(t_i+h/2,y_i+k_2/2)\hspace{0.5cm} k_4=hf(t_i+h,y_i+k_3) $$ with the final result $$ y_{i+1}=y_i +\frac{1}{6}\left( k_1 +2k_2+2k_3+k_4\right). $$ Thus, the algorithm consists in first calculating $k_1$ with $t_i$, $y_1$ and $f$ as inputs. Thereafter, we increase the step size by $h/2$ and calculate $k_2$, then $k_3$ and finally $k_4$. The global error goes as $O(h^4)$. However, at this stage, if we keep adding different methods in our main program, the code will quickly become messy and ugly. Before we proceed thus, we will now introduce functions that enbody the various methods for solving differential equations. This means that we can separate out these methods in own functions and files (and later as classes and more generic functions) and simply call them when needed. Similarly, we could easily encapsulate various forces or other quantities of interest in terms of functions. To see this, let us bring up the code we developed above for the simple sliding block, but now only with the simple forward Euler method. We introduce two functions, one for the simple Euler method and one for the force. Note that here the forward Euler method does not know the specific force function to be called. It receives just an input the name. We can easily change the force by adding another function. ``` def ForwardEuler(v,x,t,n,Force): for i in range(n-1): v[i+1] = v[i] + DeltaT*Force(v[i],x[i],t[i]) x[i+1] = x[i] + DeltaT*v[i] t[i+1] = t[i] + DeltaT def SpringForce(v,x,t): # note here that we have divided by mass and we return the acceleration return -2*gamma*v-x+Ftilde*cos(t*Omegatilde) ``` It is easy to add a new method like the Euler-Cromer ``` def ForwardEulerCromer(v,x,t,n,Force): for i in range(n-1): a = Force(v[i],x[i],t[i]) v[i+1] = v[i] + DeltaT*a x[i+1] = x[i] + DeltaT*v[i+1] t[i+1] = t[i] + DeltaT ``` and the Velocity Verlet method (be careful with time-dependence here, it is not an ideal method for non-conservative forces)) ``` def VelocityVerlet(v,x,t,n,Force): for i in range(n-1): a = Force(v[i],x[i],t[i]) x[i+1] = x[i] + DeltaT*v[i]+0.5*a anew = Force(v[i],x[i+1],t[i+1]) v[i+1] = v[i] + 0.5*DeltaT*(a+anew) t[i+1] = t[i] + DeltaT ``` Finally, we can now add the Runge-Kutta2 method via a new function ``` def RK2(v,x,t,n,Force): for i in range(n-1): # Setting up k1 k1x = DeltaT*v[i] k1v = DeltaT*Force(v[i],x[i],t[i]) # Setting up k2 vv = v[i]+k1v*0.5 xx = x[i]+k1x*0.5 k2x = DeltaT*vv k2v = DeltaT*Force(vv,xx,t[i]+DeltaT*0.5) # Final result x[i+1] = x[i]+k2x v[i+1] = v[i]+k2v t[i+1] = t[i]+DeltaT ``` Finally, we can now add the Runge-Kutta2 method via a new function ``` def RK4(v,x,t,n,Force): for i in range(n-1): # Setting up k1 k1x = DeltaT*v[i] k1v = DeltaT*Force(v[i],x[i],t[i]) # Setting up k2 vv = v[i]+k1v*0.5 xx = x[i]+k1x*0.5 k2x = DeltaT*vv k2v = DeltaT*Force(vv,xx,t[i]+DeltaT*0.5) # Setting up k3 vv = v[i]+k2v*0.5 xx = x[i]+k2x*0.5 k3x = DeltaT*vv k3v = DeltaT*Force(vv,xx,t[i]+DeltaT*0.5) # Setting up k4 vv = v[i]+k3v xx = x[i]+k3x k4x = DeltaT*vv k4v = DeltaT*Force(vv,xx,t[i]+DeltaT) # Final result x[i+1] = x[i]+(k1x+2*k2x+2*k3x+k4x)/6. v[i+1] = v[i]+(k1v+2*k2v+2*k3v+k4v)/6. t[i+1] = t[i] + DeltaT ``` The Runge-Kutta family of methods are particularly useful when we have a time-dependent acceleration. If we have forces which depend only the spatial degrees of freedom (no velocity and/or time-dependence), then energy conserving methods like the Velocity Verlet or the Euler-Cromer method are preferred. As soon as we introduce an explicit time-dependence and/or add dissipitave forces like friction or air resistance, then methods like the family of Runge-Kutta methods are well suited for this. The code below uses the Runge-Kutta4 methods. ``` DeltaT = 0.001 #set up arrays tfinal = 20 # in years n = ceil(tfinal/DeltaT) # set up arrays for t, v, and x t = np.zeros(n) v = np.zeros(n) x = np.zeros(n) # Initial conditions (can change to more than one dim) x0 = 1.0 v0 = 0.0 x[0] = x0 v[0] = v0 gamma = 0.2 Omegatilde = 0.5 Ftilde = 1.0 # Start integrating using Euler's method # Note that we define the force function as a SpringForce RK4(v,x,t,n,SpringForce) # Plot position as function of time fig, ax = plt.subplots() ax.set_ylabel('x[m]') ax.set_xlabel('t[s]') ax.plot(t, x) fig.tight_layout() save_fig("ForcedBlockRK4") plt.show() ``` <!-- !split --> ## Principle of Superposition and Periodic Forces (Fourier Transforms) If one has several driving forces, $F(t)=\sum_n F_n(t)$, one can find the particular solution to each $F_n$, $x_{pn}(t)$, and the particular solution for the entire driving force is <!-- Equation labels as ordinary links --> <div id="_auto22"></div> $$ \begin{equation} x_p(t)=\sum_nx_{pn}(t). \label{_auto22} \tag{30} \end{equation} $$ This is known as the principal of superposition. It only applies when the homogenous equation is linear. If there were an anharmonic term such as $x^3$ in the homogenous equation, then when one summed various solutions, $x=(\sum_n x_n)^2$, one would get cross terms. Superposition is especially useful when $F(t)$ can be written as a sum of sinusoidal terms, because the solutions for each sinusoidal (sine or cosine) term is analytic, as we saw above. Driving forces are often periodic, even when they are not sinusoidal. Periodicity implies that for some time $\tau$ $$ \begin{eqnarray} F(t+\tau)=F(t). \end{eqnarray} $$ One example of a non-sinusoidal periodic force is a square wave. Many components in electric circuits are non-linear, e.g. diodes, which makes many wave forms non-sinusoidal even when the circuits are being driven by purely sinusoidal sources. The code here shows a typical example of such a square wave generated using the functionality included in the **scipy** Python package. We have used a period of $\tau=0.2$. ``` %matplotlib inline import numpy as np import math from scipy import signal import matplotlib.pyplot as plt # number of points n = 500 # start and final times t0 = 0.0 tn = 1.0 # Period t = np.linspace(t0, tn, n, endpoint=False) SqrSignal = np.zeros(n) SqrSignal = 1.0+signal.square(2*np.pi*5*t) plt.plot(t, SqrSignal) plt.ylim(-0.5, 2.5) plt.show() ``` For the sinusoidal example studied in the previous subsections the period is $\tau=2\pi/\omega$. However, higher harmonics can also satisfy the periodicity requirement. In general, any force that satisfies the periodicity requirement can be expressed as a sum over harmonics, <!-- Equation labels as ordinary links --> <div id="_auto23"></div> $$ \begin{equation} F(t)=\frac{f_0}{2}+\sum_{n>0} f_n\cos(2n\pi t/\tau)+g_n\sin(2n\pi t/\tau). \label{_auto23} \tag{31} \end{equation} $$ From the previous subsection, one can write down the answer for $x_{pn}(t)$, by substituting $f_n/m$ or $g_n/m$ for $F_0/m$ into Eq.s ([16](#eq:fastdriven1)) or ([17](#eq:fastdriven2)) respectively. By writing each factor $2n\pi t/\tau$ as $n\omega t$, with $\omega\equiv 2\pi/\tau$, <!-- Equation labels as ordinary links --> <div id="eq:fourierdef1"></div> $$ \begin{equation} \label{eq:fourierdef1} \tag{32} F(t)=\frac{f_0}{2}+\sum_{n>0}f_n\cos(n\omega t)+g_n\sin(n\omega t). \end{equation} $$ The solutions for $x(t)$ then come from replacing $\omega$ with $n\omega$ for each term in the particular solution in Equations ([9](#eq:partform)) and ([12](#eq:Ddrive)), $$ \begin{eqnarray} x_p(t)&=&\frac{f_0}{2k}+\sum_{n>0} \alpha_n\cos(n\omega t-\delta_n)+\beta_n\sin(n\omega t-\delta_n),\\ \nonumber \alpha_n&=&\frac{f_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\ \nonumber \beta_n&=&\frac{g_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\ \nonumber \delta_n&=&\tan^{-1}\left(\frac{2\beta n\omega}{\omega_0^2-n^2\omega^2}\right). \end{eqnarray} $$ Because the forces have been applied for a long time, any non-zero damping eliminates the homogenous parts of the solution, so one need only consider the particular solution for each $n$. The problem will considered solved if one can find expressions for the coefficients $f_n$ and $g_n$, even though the solutions are expressed as an infinite sum. The coefficients can be extracted from the function $F(t)$ by <!-- Equation labels as ordinary links --> <div id="eq:fourierdef2"></div> $$ \begin{eqnarray} \label{eq:fourierdef2} \tag{33} f_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\cos(2n\pi t/\tau),\\ \nonumber g_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\sin(2n\pi t/\tau). \end{eqnarray} $$ To check the consistency of these expressions and to verify Eq. ([33](#eq:fourierdef2)), one can insert the expansion of $F(t)$ in Eq. ([32](#eq:fourierdef1)) into the expression for the coefficients in Eq. ([33](#eq:fourierdef2)) and see whether $$ \begin{eqnarray} f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~\left\{ \frac{f_0}{2}+\sum_{m>0}f_m\cos(m\omega t)+g_m\sin(m\omega t) \right\}\cos(n\omega t). \end{eqnarray} $$ Immediately, one can throw away all the terms with $g_m$ because they convolute an even and an odd function. The term with $f_0/2$ disappears because $\cos(n\omega t)$ is equally positive and negative over the interval and will integrate to zero. For all the terms $f_m\cos(m\omega t)$ appearing in the sum, one can use angle addition formulas to see that $\cos(m\omega t)\cos(n\omega t)=(1/2)(\cos[(m+n)\omega t]+\cos[(m-n)\omega t]$. This will integrate to zero unless $m=n$. In that case the $m=n$ term gives <!-- Equation labels as ordinary links --> <div id="_auto24"></div> $$ \begin{equation} \int_{-\tau/2}^{\tau/2}dt~\cos^2(m\omega t)=\frac{\tau}{2}, \label{_auto24} \tag{34} \end{equation} $$ and $$ \begin{eqnarray} f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~f_n/2\\ \nonumber &=&f_n~\checkmark. \end{eqnarray} $$ The same method can be used to check for the consistency of $g_n$. Consider the driving force: <!-- Equation labels as ordinary links --> <div id="_auto25"></div> $$ \begin{equation} F(t)=At/\tau,~~-\tau/2<t<\tau/2,~~~F(t+\tau)=F(t). \label{_auto25} \tag{35} \end{equation} $$ Find the Fourier coefficients $f_n$ and $g_n$ for all $n$ using Eq. ([33](#eq:fourierdef2)). Only the odd coefficients enter by symmetry, i.e. $f_n=0$. One can find $g_n$ integrating by parts, <!-- Equation labels as ordinary links --> <div id="eq:fouriersolution"></div> $$ \begin{eqnarray} \label{eq:fouriersolution} \tag{36} g_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2}dt~\sin(n\omega t) \frac{At}{\tau}\\ \nonumber u&=&t,~dv=\sin(n\omega t)dt,~v=-\cos(n\omega t)/(n\omega),\\ \nonumber g_n&=&\frac{-2A}{n\omega \tau^2}\int_{-\tau/2}^{\tau/2}dt~\cos(n\omega t) +\left.2A\frac{-t\cos(n\omega t)}{n\omega\tau^2}\right|_{-\tau/2}^{\tau/2}. \end{eqnarray} $$ The first term is zero because $\cos(n\omega t)$ will be equally positive and negative over the interval. Using the fact that $\omega\tau=2\pi$, $$ \begin{eqnarray} g_n&=&-\frac{2A}{2n\pi}\cos(n\omega\tau/2)\\ \nonumber &=&-\frac{A}{n\pi}\cos(n\pi)\\ \nonumber &=&\frac{A}{n\pi}(-1)^{n+1}. \end{eqnarray} $$ ## Fourier Series More text will come here, chpater 5.7-5.8 of Taylor are discussed during the lectures. The code here uses the Fourier series discussed in chapter 5.7 for a square wave signal. The equations for the coefficients are are discussed in Taylor section 5.7, see Example 5.4. The code here visualizes the various approximations given by Fourier series compared with a square wave with period $T=0.2$, witth $0.1$ and max value $F=2$. We see that when we increase the number of components in the Fourier series, the Fourier series approximation gets closes and closes to the square wave signal. ``` import numpy as np import math from scipy import signal import matplotlib.pyplot as plt # number of points n = 500 # start and final times t0 = 0.0 tn = 1.0 # Period T =0.2 # Max value of square signal Fmax= 2.0 # Width of signal Width = 0.1 t = np.linspace(t0, tn, n, endpoint=False) SqrSignal = np.zeros(n) FourierSeriesSignal = np.zeros(n) SqrSignal = 1.0+signal.square(2*np.pi*5*t+np.pi*Width/T) a0 = Fmax*Width/T FourierSeriesSignal = a0 Factor = 2.0*Fmax/np.pi for i in range(1,500): FourierSeriesSignal += Factor/(i)*np.sin(np.pi*i*Width/T)*np.cos(i*t*2*np.pi/T) plt.plot(t, SqrSignal) plt.plot(t, FourierSeriesSignal) plt.ylim(-0.5, 2.5) plt.show() ```
github_jupyter
# Data Processing The project has five steps: - delet irregular (too large or small (no data)) and non-image data - remove duplicate image - remove irrelevant image - split dataset: create classes.txt, train.txt, test.txt - rename images ### Deleting irragular images ``` import os import sys import imghdr class ImageDelet(): def __init__(self): self.path = '/home/gpu/Project/dataProcess/bun/' self.imageTypes = ['.jpg', '.jpeg', '.png', '.gif'] delet_count = 0 def delet(self): filelist = os.listdir(self.path) total_num = len(filelist) delet_count = 0 for item in filelist: src = os.path.join(os.path.abspath(self.path), item) image_type = os.path.splitext(src)[-1] if not imghdr.what(src): os.remove(src) # delet corrupted image delet_count += 1 elif image_type in self.imageTypes: imageSize = sys.getsizeof(src) # most abnormal image's getsizeof will exceed 150 # print(imageSize) if imageSize > 150: os.remove(src) delet_count += 1 else: continue else: os.remove(src) # delet non-image data delet_count += 1 print ('Total: %d\nDelet: %d' % (total_num, delet_count)) deletImage = ImageDelet() deletImage.delet() ``` ### Renaming the images downloaded by the web crawler. ### Renaming the images which have been processed. ``` class ImageRename(): def __init__(self): self.path = '/home/gpu/Project/dataProcess/bun/' def rename(self): filelist = os.listdir(self.path) filelist.sort() # if the filelist is not sorted, some file will be replaced when repeating rename result in total_num = len(filelist) rename_count = 0 for item in filelist: src = os.path.join(os.path.abspath(self.path), item) image_type = os.path.splitext(src)[-1] # if image_type in self.imageTypes: # dst = os.path.join(os.path.abspath(self.path), format(str(rename_count), '0>4s') + '.jpg') dst = os.path.join(os.path.abspath(self.path), str(rename_count).zfill(4) + image_type) os.rename(src, dst) print ('converting %s to %s ...' % (src, dst)) rename_count += 1 # elif os.path.isdir(src): # continue # else: # os.remove(src) # delet_count += 1 print ('Total: %d\nRename: %d' % (total_num, rename_count)) newName = ImageRename() newName.rename() ``` ### Removing the duplicate images ``` # Perceptual Hash Algorithm - dHash import cv2 def dhash(image): # convert image to 8*8 image = cv2.resize(image, (9, 8), interpolation=cv2.INTER_CUBIC) # convert image to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) dhash_str = '' for i in range(8): for j in range(8): if gray[i, j] > gray[i, j + 1]: dhash_str = dhash_str + '1' else: dhash_str = dhash_str + '0' result = '' for i in range(0, 64, 4): result += ''.join('%x' % int(dhash_str[i: i + 4], 2)) return result # calculate the difference between hash1 and hash2 def campHash(hash1, hash2): n = 0 # If the hash length is different, the comparison cannot be made, and -1 is returned. if len(hash1) != len(hash2): return -1 # If the hash length is same, traversing hash1 ahd hash2 for comparison. for i in range(len(hash1)): if hash1[i] != hash2[i]: n = n + 1 return n image1 = cv2.imread('/home/gpu/Project/dataProcess/bun/0017.jpg') image2 = cv2.imread('/home/gpu/Project/dataProcess/bun/0018.jpeg') hash1 = dhash(image1) hash2 = dhash(image2) distance_hash = campHash(hash1, hash2) # if campHash == 0, it means that the two images are duplicate images. image2_path = '/home/gpu/Project/dataProcess/bun/0012.jpeg' if distance_hash == 0: os.remove(image2_path) ``` ### Removing the irrelevant images ``` # Perceptual Hash Algorithm - dHash import cv2 def dhash(image): # convert image to 8*8 image = cv2.resize(image, (9, 8), interpolation=cv2.INTER_CUBIC) # convert image to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) dhash_str = '' for i in range(8): for j in range(8): if gray[i, j] > gray[i, j + 1]: dhash_str = dhash_str + '1' else: dhash_str = dhash_str + '0' result = '' for i in range(0, 64, 4): result += ''.join('%x' % int(dhash_str[i: i + 4], 2)) return result # calculate the difference between hash1 and hash2 def campHash(hash1, hash2): n = 0 # If the hash length is different, the comparison cannot be made, and -1 is returned. if len(hash1) != len(hash2): return -1 # If the hash length is same, traversing hash1 ahd hash2 for comparison. for i in range(len(hash1)): if hash1[i] != hash2[i]: n = n + 1 return n image1 = cv2.imread('/home/gpu/Project/dataProcess/bun/0017.jpg') image2 = cv2.imread('/home/gpu/Project/dataProcess/bun/0013.jpeg') hash1 = dhash(image1) hash2 = dhash(image2) distance_hash = campHash(hash1, hash2) # if campHash > 10, it means that the two images are different classes. image2_path = '/home/gpu/Project/dataProcess/bun/0012.jpeg' if distance_hash > 10: os.remove(image2_path) ``` ### Spliting dataset #### Generate the train.txt, test.txt, and classes.txt. ``` dataset_path = '/home/gpu/Project/dataProcess/' def gengrateClass(dataset_path): filelist = os.listdir(dataset_path) for file_name in filelist: if file_name.startswith('.'): filelist.remove(file_name) filelist.sort() class_savePath = '/home/gpu/Project/dataProcess/meta/class.txt' # If filename does not exist, it will be created automatically. #'w' means to write data. The original data in the file will be cleared before writing! with open(class_savePath,'w') as f: for file_name in filelist: f.write(file_name) f.write('\n') gengrateClass(dataset_path) import math def splitDataset(dataset_path): filelist = os.listdir(dataset_path) for file_name in filelist: if file_name.startswith('.'): filelist.remove(file_name) filelist.sort() train_savePath = '/home/gpu/Project/dataProcess/meta/train.txt' test_savePath = '/home/gpu/Project/dataProcess/meta/test.txt' for file_name in filelist: image_path = dataset_path + file_name image_list = os.listdir(image_path) for image_name in image_list: if image_name.startswith('.'): image_list.remove(image_name) image_size = len(image_list) train_size = math.ceil(image_size * 0.75) # If filename does not exist, it will be created automatically. #'w' means to append data. The original data in the file will not be cleared. with open(train_savePath,'a') as train: for file_name in image_list[:train_size]: train.write(file_name) train.write('\n') with open(test_savePath,'a') as test: for file_name in image_list[train_size:]: test.write(file_name) test.write('\n') splitDataset(dataset_path) ```
github_jupyter
``` import pymc3 as pm import matplotlib.pyplot as plt import pandas as pd import numpy as np import theano.tensor as tt import theano %load_ext autoreload %autoreload 2 %matplotlib inline %config InlineBackend.figure_format = 'retina' df = pd.read_csv('../datasets/bikes/hour.csv') df feature_cols = ['workingday', 'holiday', 'temp', 'atemp', 'hum', 'windspeed'] out_col = ['cnt'] df[out_col] X = pm.floatX(df[feature_cols]) Y = pm.floatX(df[out_col].apply(np.log10)) n_hidden = X.shape[1] with pm.Model() as nn_model: w1 = pm.Normal('w1', mu=0, sd=1, shape=(X.shape[1], n_hidden)) w2 = pm.Normal('w2', mu=0, sd=1, shape=(n_hidden, 1)) b1 = pm.Normal('b1', mu=0, sd=1, shape=(n_hidden,)) b2 = pm.Normal('b2', mu=0, sd=1, shape=(1,)) a1 = pm.Deterministic('a1', tt.nnet.relu(tt.dot(X, w1) + b1)) a2 = pm.Deterministic('a2', tt.dot(a1, w2) + b2) output = pm.Normal('likelihood', mu=a2, observed=Y) with pm.Model() as three_layer_model: w1 = pm.Normal('w1', mu=0, sd=1, shape=(X.shape[1], n_hidden)) w2 = pm.Normal('w2', mu=0, sd=1, shape=(n_hidden, n_hidden)) w3 = pm.Normal('w3', mu=0, sd=1, shape=(n_hidden, 1)) b1 = pm.Normal('b1', mu=0, sd=1, shape=(n_hidden,)) b2 = pm.Normal('b2', mu=0, sd=1, shape=(n_hidden,)) b3 = pm.Normal('b3', mu=0, sd=1, shape=(1,)) a1 = pm.Deterministic('a1', tt.nnet.relu(tt.dot(X, w1) + b1)) a2 = pm.Deterministic('a2', tt.nnet.relu(tt.dot(a1, w2) + b2)) a3 = pm.Deterministic('a3', tt.dot(a2, w3) + b3) sd = pm.HalfCauchy('sd', beta=1) output = pm.Normal('likelihood', mu=a3, sd=sd, observed=Y) with pm.Model() as linreg_model: w1 = pm.Normal('w1', mu=0, sd=1, shape=(X.shape[1], 1)) b1 = pm.Normal('b1', mu=0, sd=1, shape=(1,)) a1 = pm.Deterministic('a1', tt.dot(X, w1) + b1) sd = pm.HalfCauchy('sd', beta=1) output = pm.Normal('likelihood', mu=a1, sd=sd, observed=Y) with linreg_model: s = theano.shared(pm.floatX(1.1)) inference = pm.ADVI(cost_part_grad_scale=s, learning_rate=.01) approx = pm.fit(200000, method=inference) plt.plot(inference.hist) with linreg_model: trace = approx.sample(2000) pm.traceplot(trace, varnames=['w1', 'b1']) with linreg_model: samps = pm.sample_ppc(trace) samps['likelihood'].std(axis=0) samps['likelihood'].mean(axis=0) from sklearn.metrics import mean_squared_error as mse mse(Y, samps['likelihood'].mean(axis=0)) plt.scatter(samps['likelihood'].mean(axis=0).squeeze(), Y.values) ```
github_jupyter
Parametric non Parametric inference =================== Suppose you have a physical model of an output variable, which takes the form of a parametric model. You now want to model the random effects of the data by a non-parametric (better: infinite parametric) model, such as a Gaussian Process as described in [BayesianLinearRegression](../background/BayesianLinearRegression.ipynb). We can do inference in both worlds, the parameteric and infinite parametric one, by extending the features to a mix between \begin{align} p(\mathbf{y}|\boldsymbol{\Phi}, \alpha, \sigma) &= \int p(\mathbf{y}|\boldsymbol{\Phi}, \mathbf{w}, \sigma)p(\mathbf{w}|\alpha) \,\mathrm{d}\mathbf{w}\\ &= \langle\mathcal{N}(\mathbf{y}|\boldsymbol{\Phi}\mathbf{w}, \sigma^2\mathbf{I})\rangle_{\mathcal{N}(\mathbf{0}, \alpha\mathbf{I})}\\ &= \mathcal{N}(\mathbf{y}|\mathbf{0}, \alpha\boldsymbol{\Phi}\boldsymbol{\Phi}^\top + \sigma^2\mathbf{I}) \end{align} Thus, we can maximize this marginal likelihood w.r.t. the hyperparameters $\alpha, \sigma$ by log transforming and maximizing: \begin{align} \hat\alpha, \hat\sigma = \mathop{\arg\max}_{\alpha, \sigma}\log p(\mathbf{y}|\boldsymbol{\Phi}, \alpha, \sigma) \end{align} So we will define a mixed inference model mixing parametric and non-parametric models together. One part is described by a paramtric feature space mapping $\boldsymbol{\Phi}\mathbf{w}$ and the other part is a non-parametric function $\mathbf{f}_\text{n}$. For this we define the underlying function $\mathbf{f}$ as $$ \begin{align} p(\mathbf{f}) &= p\left( \underbrace{ \begin{bmatrix} \delta(t-T)\\ \boldsymbol{\Phi} \end{bmatrix} }_{=:\mathbf{A}} \left. \begin{bmatrix} \mathbf{f}_{\text{n}}\\ \mathbf{w} \end{bmatrix} \right| \mathbf{0}, \mathbf{A} \underbrace{ \begin{bmatrix} \mathbf{K}_{\mathbf{f}} & \\ & \mathbf{K}_{\mathbf{w}} \end{bmatrix} }_{=:\boldsymbol{\Sigma}} \mathbf{A}^\top \right)\enspace, \end{align} $$ where $\mathbf{K}_{\mathbf{f}}$ is the covariance describing the non-parametric part $\mathbf{f}_\text{n}\sim\mathcal{N}(\mathbf{0}, \mathbf{K}_\mathbf{f})$ and $\mathbf{K}_{\mathbf{w}}$ is the covariance of the prior over $\mathbf{w}\sim\mathcal{N}(\mathbf{w}|\mathbf{0}, \mathbf{K}_{\mathbf{w}})$. Thus we can now predict the different parts and even the paramters $\mathbf{w}$ themselves using (Note: If someone is willing to write down the proper path to this, here a welcome and thank you very much. Thanks to Philipp Hennig for his ideas in this.) $$ \begin{align} p(\mathbf{f}|\mathbf{y}) &= \mathcal{N}(\mathbf{f} | \boldsymbol{\Sigma}\mathbf{A}^\top \underbrace{ (\mathbf{A}\boldsymbol{\Sigma}\mathbf{A}^\top + \sigma^2\mathbf{I})^{-1}}_{=:\mathbf{K}^{-1}}\mathbf{y}, \boldsymbol{\Sigma}-\boldsymbol{\Sigma}\mathbf{A}^\top\mathbf{K}^{-1}\mathbf{A}\boldsymbol{\Sigma}) \\ p(\mathbf{w}|\mathbf{y}) &= \mathcal{N}(\mathbf{w} | \mathbf{K}_\mathbf{w}\boldsymbol{\Phi}^\top\mathbf{K}^{-1}\mathbf{y}, \mathbf{K}_{\mathbf{w}}-\mathbf{K}_{\mathbf{w}}\boldsymbol{\Phi}^\top\mathbf{K}^{-1}\boldsymbol{\Phi}\mathbf{K}_{\mathbf{w}})) \\ p(\mathbf{f}_\text{n}|\mathbf{y}) &= \mathcal{N}(\mathbf{f}_\text{n}| \mathbf{K}_\mathbf{f}\mathbf{K}^{-1}\mathbf{y}, \mathbf{K}_{\mathbf{f}}-\mathbf{K}_{\mathbf{f}}\mathbf{K}^{-1}\mathbf{K}_{\mathbf{f}})) \end{align} $$ ``` import GPy, numpy as np, pandas as pd from GPy.kern import LinearSlopeBasisFuncKernel, DomainKernel, ChangePointBasisFuncKernel %matplotlib inline from matplotlib import pyplot as plt ``` We will create some data with a non-linear function, strongly driven by piecewise linear trends: ``` np.random.seed(12345) x = np.random.uniform(0, 10, 40)[:,None] x.sort(0) starts, stops = np.arange(0, 10, 3), np.arange(1, 11, 3) k_lin = LinearSlopeBasisFuncKernel(1, starts, stops, variance=1., ARD=1) Phi = k_lin.phi(x) _ = plt.plot(x, Phi) ``` We will assume the prior over $w_i\sim\mathcal{N}(0, 3)$ and a Matern32 structure in the non-parametric part. Additionally, we add a half parametric part, which is a periodic effect only active between $x\in[3,8]$: ``` k = GPy.kern.Matern32(1, .3) Kf = k.K(x) k_per = GPy.kern.PeriodicMatern32(1, variance=100, period=1) k_per.period.fix() k_dom = DomainKernel(1, 1., 5.) k_perdom = k_per * k_dom Kpd = k_perdom.K(x) np.random.seed(1234) alpha = np.random.gamma(3, 1, Phi.shape[1]) w = np.random.normal(0, alpha)[:,None] f_SE = np.random.multivariate_normal(np.zeros(x.shape[0]), Kf)[:, None] f_perdom = np.random.multivariate_normal(np.zeros(x.shape[0]), Kpd)[:, None] f_w = Phi.dot(w) f = f_SE + f_w + f_perdom y = f + np.random.normal(0, .1, f.shape) plt.plot(x, f_w) _ = plt.plot(x, y) # Make sure the function is driven by the linear trend, as there can be a difficulty in identifiability. ``` With this data, we can fit a model using the basis functions as paramtric part. If you want to implement your own basis function kernel, see GPy.kern._src.basis_funcs.BasisFuncKernel and implement the necessary parts. Usually it is enough to implement the phi(X) method, returning the higher dimensional mapping of inputs X. ``` k = (GPy.kern.Bias(1) + GPy.kern.Matern52(1) + LinearSlopeBasisFuncKernel(1, ARD=1, start=starts, stop=stops, variance=.1, name='linear_slopes') + k_perdom.copy() ) k.randomize() m = GPy.models.GPRegression(x, y, k) m.checkgrad() m.optimize() m.plot() x_pred = np.linspace(0, 10, 500)[:,None] pred_SE, var_SE = m._raw_predict(x_pred, kern=m.kern.Mat52) pred_per, var_per = m._raw_predict(x_pred, kern=m.kern.mul) pred_bias, var_bias = m._raw_predict(x_pred, kern=m.kern.bias) pred_lin, var_lin = m._raw_predict(x_pred, kern=m.kern.linear_slopes) m.plot_f(resolution=500, predict_kw=dict(kern=m.kern.Mat52), plot_data=False) plt.plot(x, f_SE) m.plot_f(resolution=500, predict_kw=dict(kern=m.kern.mul), plot_data=False) plt.plot(x, f_perdom) m.plot_f(resolution=500, predict_kw=dict(kern=m.kern.linear_slopes), plot_data=False) plt.plot(x, f_w) w_pred, w_var = m.kern.linear_slopes.posterior_inf() df = pd.DataFrame(w, columns=['truth'], index=np.arange(Phi.shape[1])) df['mean'] = w_pred df['std'] = np.sqrt(w_var.diagonal()) np.round(df, 2) ```
github_jupyter
# CST PTM Data Overview The PTM data from CST has a significant amount of missing data and requires special consideration when normalizing. The starting data is ratio-level-data - where log2 ratios have been calculated from the cancerous cell lines compared to the non-cancerous 'Normal Pool' data from within the 'plex'. This data is under the lung_cellline_3_1_16 directory and each PTM type has its own '_combined_ratios.tsv' file. This notebook will overview the ratio-level datat from the PTM types: phosphorylation, methylation, and acetylation. The figures in this notebook demonstrate that there is a systematic difference in the distributions of PTM measurements in the lung cancer cell lines regardless of PTMs with missing data are considered. The normalization procedures used to correct for this systematic bias are discussed in the [CST_PTM_Normalization_Overview](https://github.com/MaayanLab/CST_Lung_Cancer_Viz/blob/master/CST_PTM_Normalization_Overview.ipynb) notebook. The systematic difference in average PTM ratios in the cell lines could be due to a number of factors: * it could be biological in nature, e.g. some cell line have uniformly higher PTM levels than others * some cell lines might have higher/lower metabolism rates which will result in differences in incorporation of heavy isotopes * some cell lines might reproduce faster/slower during the time period where cells are exposed to heavy isotopes, which would result in differences in the population size of the different cell lines In any case, it can be useful towards understanding the differences in cell line behavior to remove this systematic difference. # Phosphorylation Data I'll start by having a look at the phosphorylation data that can be found in `lung_cellline_3_1_16/lung_cellline_phospho/lung_cellline_TMT_phospho_combined_ratios.tsv` This file was made using the `process_latest_cst_data.py` script. First I'll make the necessary imports. ``` # imports and plotting defaults import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import matplotlib matplotlib.style.use('ggplot') from copy import deepcopy # use clustergrammer module to load/process (source code in clustergrammer directory) from clustergrammer import Network ``` Next, I'll load the phosphorylation ratio data and simplify the column names (to improve readability) ``` # load data data and export as pandas dataframe: inst_df def load_data(filename): ''' load data using clustergrammer and export as pandas dataframe ''' net = deepcopy(Network()) net.load_file(filename) tmp_df = net.dat_to_df() inst_df = tmp_df['mat'] # simplify column names (remove categories) col_names = inst_df.columns.tolist() # simple_col_names = [] # for inst_name in col_names: # simple_col_names.append(inst_name[0]) inst_df.columns = col_names print(inst_df.shape) ini_rows = inst_df.index.tolist() unique_rows = list(set(ini_rows)) if len(ini_rows) > len(unique_rows): print('found duplicate PTMs') else: print('did not find duplicate PTMs') return inst_df filename = '../lung_cellline_3_1_16/lung_cellline_phospho/' + \ 'lung_cellline_TMT_phospho_combined_ratios.tsv' inst_df = load_data(filename) ``` I loaded the phosphorylation tsv file using clustergrammer and exported it as a pandas dataframe. We can see that there are 5,798 unique phosphorylation sites measured in all 45 lung cancer cell lines. ### Missing Phosphorylation Data However, there is also a large amount of missing data, e.g. no cell line has all 5798 phosphorylations mesaured. We can plot the number of measured phosphorylation sites (e.g. non-NaN values in the dataframe) below to get a sense of the amount of missing data ``` inst_df.count().sort_values().plot(kind='bar', figsize=(10,2)) print(type(inst_df)) ``` In the above visualization I have ranked the cell lines based in increasing number of measurements. We can see that there is a pattern in the missing data. The 45 cell lines appear to be aranged into nine groups of 5 cell lines each. These groups correpond to the 9 'plexes', or 'batches', in which the cell lines were measured. Each plex measured one control, Normal Pool, and five cancer cell lines (note that some cell lines have been measured in more than one plex and these have their plex number appended to their name). ### Cell Line Phosphorylation Distributions Since each cell line has a large number of measured phosphorylations (at least 1,500) we can reasonably expect that the distributions of phosphorylation levels in the cell lines will be similar. This is based on the assumption that biological variation is not systematic and should not result in consistently higher or lower measurements in the cell lines. Below we plot the mean values (ratios) of all measured phosphorylations in each cell line and order the cell lines by their average phosphorylation levels in ascending order. ``` def plot_cl_boxplot_with_missing_data(inst_df): ''' Make a box plot of the cell lines where the cell lines are ranked based on their average PTM levels ''' # get the order of the cell lines based on their mean sorter = inst_df.mean().sort_values().index.tolist() # reorder based on ascending mean values sort_df = inst_df[sorter] # box plot of PTM values ordered based on increasing mean sort_df.plot(kind='box', figsize=(10,3), rot=90, ylim=(-8,8)) plot_cl_boxplot_with_missing_data(inst_df) ``` We can see that there is a significant difference in the mean phosphorylation level across the cell lines. These large differenecs in the cell line distributions lead us to believe that there is a systematic error in the measurements that needs to be corrected. However, each cell line has a different subset of phosphorylations measured so to more fairly compare the cell lines we should only compare commonly measured phosphorylations. Below we plot the mean values of phosphorylations that were measured in all cell lines. ``` def plot_cl_boxplot_no_missing_data(inst_df): # get the order of the cell lines based on their mean sorter = inst_df.mean().sort_values().index.tolist() # reorder based on ascending mean values sort_df = inst_df[sorter] # transpose to get PTMs as columns tmp_df = sort_df.transpose() # keep only PTMs that are measured in all cell lines ptm_num_meas = tmp_df.count() ptm_all_meas = ptm_num_meas[ptm_num_meas == 45] ptm_all_meas = ptm_all_meas.index.tolist() print('There are ' + str(len(ptm_all_meas)) + ' PTMs measured in all cell lines') # only keep ptms that are measured in all cell lines # I will call this full_df as in no missing measurements full_df = tmp_df[ptm_all_meas] # transpose back to PTMs as rows full_df = full_df.transpose() full_df.plot(kind='box', figsize=(10,3), rot=90, ylim=(-8,8)) num_ptm_all_meas = len(ptm_all_meas) plot_cl_boxplot_no_missing_data(inst_df) ``` From the above box plot we can see that there is a significant difference in the distributions of the cell lines even when we only consider phosphorylations that were measured in all cell lines (note that the cell lines are in the same order as the previous box plot). This indicates that this systematic differnce in average phosphorylation values is not caused by missing values. Since we do not expect biological variation to cause this type of systematic difference between cell lines we can conclude that the large differences between cell lines are likely the result of systematic experimental error that should be corrected. Normalizing the data will be discussed [here](https://github.com/MaayanLab/CST_Lung_Cancer_Viz) # Acetylation Data I will perform the same overview on the acetylation data. There are 1,192 unique acetylations measured in the 45 cell lines. ``` filename = '../lung_cellline_3_1_16/lung_cellline_Ack/' + \ 'lung_cellline_TMT_Ack_combined_ratios.tsv' inst_df = load_data(filename) ``` ### Missing Acetylation Data ``` inst_df.count().sort_values().plot(kind='bar', figsize=(10,2)) ``` ### Cell Line Acetylation Distributions ``` plot_cl_boxplot_with_missing_data(inst_df) ``` Distribution of Acetylation data that was measured in all cell lines ``` plot_cl_boxplot_no_missing_data(inst_df) ``` # Methylation Data The methylation data has been broken up into Arginine and Lysine methylation. ## Arginine Methylation There are 1,248 Arginine methylations measured in all 42 cell lines ``` filename = '../lung_cellline_3_1_16/lung_cellline_Rme1/' + \ 'lung_cellline_TMT_Rme1_combined_ratios.tsv' inst_df = load_data(filename) ``` ### Missing Arginine Methylation Data ``` inst_df.count().sort_values().plot(kind='bar', figsize=(10,2)) ``` ### Cell Line Arginine Methylation Distributions ``` plot_cl_boxplot_with_missing_data(inst_df) ``` Argining Methylation that was measured in all cell lines ``` plot_cl_boxplot_no_missing_data(inst_df) ``` ## Lysine Methylation Data There are 230 lysine methylations measured in all cell line ``` filename = '../lung_cellline_3_1_16/lung_cellline_Kme1/' + \ 'lung_cellline_TMT_Kme1_combined_ratios.tsv' inst_df = load_data(filename) ``` ### Missing Lysine Methylation Data Some cell lines have as few as 40 lysine methylations measured. ``` inst_df.count().sort_values().plot(kind='bar', figsize=(10,2)) ``` ### Cell Line Lysine Metylation Distributions ``` plot_cl_boxplot_with_missing_data(inst_df) ``` Lysine methylation that was measured in all cell lines ``` plot_cl_boxplot_no_missing_data(inst_df) ``` There were only 26 lysine methylations that were measured in all cell lines. We still see the bias in the average values across the cell lines. # Conclusions We see that the PTM measurements (phosphorylation, acetylation, and methylation) all show large differences in average behavior across the cell lines. Furthermore, the cell lines with the highest and lowest ratios are frequently the same: DMS153 is hte cell line with the lowest ratios and H661 is the cell line with the highest ratios in all cases. ' In other words, if we were to ask which cell line has the highest or lowest level of a particular PTM site we would almost always get the same cell line no matter which site we were interested in. Since this type of uniform and systematic difference between cell lines is not what we expect biologically we can conclude that the ratio data should be normalized in some way. The normalization procedure and its affects on cell line clustering are discussed in the notebook [CST_PTM_Normalization_Overview](https://github.com/MaayanLab/CST_Lung_Cancer_Viz/blob/master/CST_PTM_Normalization_Overview.ipynb) notebook.
github_jupyter
# Sublime Text ## Getting set up ### Laptop install Sublime Text (Done once per laptop) 1. Step one is to download and install [Sublime Text](https://www.sublimetext.com/3). Sidenote: You don't need to purchase a license, you can use it forever with all features in evaluate mode. If you purchase a license it follows you and you can install it on all your future laptops. 2. **Install Package Manager**: open Sublime Text, then open command palette (we will use this several times) - CMD + SHIFT + P (Mac) - CTRL + SHIFT + P (Windows) start typing "install" (Sidenote: as you type it will auto-filter, you can always select with your mouse the one that you want, or if the one you want is the top highlighted match, just hit enter.) ## SQLBeautifier ```../examples/sqlbeautifier.sql``` ## install sublimelinter ### proselint (markdown) ### shellcheck From: https://github.com/koalaman/shellcheck > The goals of ShellCheck are > - To point out and clarify typical beginner's syntax issues that cause a shell to give cryptic error messages. > - To point out and clarify typical intermediate level semantic problems that cause a shell to behave strangely and counter-intuitively. > - To point out subtle caveats, corner cases and pitfalls that may cause an advanced user's otherwise working script to fail under future circumstances. > See the [gallery of bad code](https://github.com/koalaman/shellcheck/blob/master/README.md#user-content-gallery-of-bad-code) for examples of what ShellCheck can help you identify! ## anaconda (not what you think!) Automatically formats your code to be pep8 (or whatever variant you prefer). Should SVDS have an official style-guide for python? ## Others - install BracketHighlighter - install SidebarEnhancements - install text pastry - C-option N example - install wordcount - sublime-build - tools > build - install LaTeXTools (academic papers) ## rsub (subl) Once you set up everything as below this is how you'll be able to edit files living on a server from the comfort of your laptop. 1. `ssh` into the Mothership by setting up the port forwarding (keeping this open) 2. Sublime Text open on your laptop 3. `subl whatever.py` and enjoy editing your text file on your laptop's Sublime Text (remember to hit save frequently!) ### Setting up remote Sublime Text editing These instructions tell you how to set up your laptop and a server (mothership) so that you can edit files directly on the server by using Sublime Text on your laptop. You will have to make changes at different places and these instructions vary by what kind of laptop you have (windows/macOS). Also, for complicated reasons, each laptop that connects to the mothership needs to have its own unique ports assigned to it. This applies to you if you have 2 laptops. So we'll start out assigning the following ports to people. For the rest of the instructions, you should replace {YOUR_PORT} with the following numbers (and choose the one assigned to you): 52687 # Free to assign 52688 # Free to assign 52689 # Free to assign 52690 # Free to assign 52691 # Free to assign 52692 # Free to assign 52693 # Free to assign 52694 # Free to assign 52695 # Free to assign 52696 # Free to assign 52698 # Default port Again, we just arbitrarily assigned these (see the advanced notes section if you need to change this). And where you see {MOTHERSHIP_IP_ADDRESS} replace with the correct edgenode IP address: at the time of writing this, the SVDS Node's IP was: 10.178.134.62 And where you see {USER} replace with your username `jbwhit` for example. ### Installing `rsub` 1. **Install `rsub`**: open command palette; type `install` (select option "Package Control: Install Package"); type `rsub` and select it. If you don't see it listed it's likely already installed. You can check by opening preferences and seeing if you have an rsub option. 2. Create a file on your laptop called `rsub.sublime-settings` in folder (find by clicking in Sublime Text): `Preferences>Browse Packages>User>` The contents of the file -- remember to replace {YOUR_PORT} with your port: ``` /* rsub default settings */ { /* rsub listen port IMPORTANT: Use a different port for each machine. */ "port": {YOUR_PORT}, /* rsub listen host WARNING: it's NOT recommended to change this option, use SSH tunneling instead. */ "host": "localhost" } ``` ### Laptop ssh port forwarding [Windows] We recommend installing [Git Bash](https://git-scm.com/download/win) -- install and accept the default options. Create a shortcut script to connect to the edgenode with the Sublime connection. 1. Start GitBash 2. Create a file called `sublime_port_forward` (or whatever you want it to be) a. Navigate to your home directory on your Windows machine and create a new file (with Sublime Text if you want)! 3. Paste the following one line as the entire content of that file (replacing as required): ssh -R {YOUR_PORT}:localhost:{YOUR_PORT} {USER}@{MOTHERSHIP_IP_ADDRESS} Example: `ssh -R 52697:localhost:52697 [email protected]` 4. Save the file ### Setting up ssh port forwarding [MacOS] 1. Edit `~/.ssh/config` and update with relevant IP address {MOTHERSHIP_IP_ADDRESS} -- replace "{MOTHERSHIP_IP_ADDRESS}" with a number like {MOTHERSHIP_IP_ADDRESS} ```bash Host rsub-svdsnode HostName {MOTHERSHIP_IP_ADDRESS} RemoteForward {YOUR_PORT} 127.0.0.1:{YOUR_PORT} ``` Setting up this config lets you type `ssh rsub-svdsnode` and you will SSH into the mothership. You can shorten this name to simply `rsub` or anything else in the Host section of the config. If you connect to multiple motherships (or edgenodes) simply create new rule by copy/pasting the three lines and filling in the relevant details. ### Set up Mothership (Done once) These steps set up your account on the mothership. 1. Edit (or create) your `~/.bashrc`. Open with `vim ~/.bashrc` and add the following and **uncomment your port**: ```bash export RMATE_HOST=localhost # export RMATE_PORT=52694 # # export RMATE_PORT=52695 # # export RMATE_PORT=52696 # ``` ### Running (what you do on a daily basis) #### Windows Since you've set up the script, you will be able to connect to the edgenode with the Sublime connection by simply running the following command after opening GitBash (remember the `.`): ```bash . sublime_port_forward ``` And you (after entering your password) are logged into the Mothership. You will use prompt to open text files. #### MacOS Set up the port forwarding (you have to keep this open). You can do it the hard way: ```bash ssh -R {YOUR_PORT}:localhost:{YOUR_PORT} {USER}@{MOTHERSHIP_IP_ADDRESS} ``` or the easier way (if you set up your ssh config file as above): ```bash ssh rsub-svdsnode ``` Have Sublime Text running on your laptop -- this is where the file will appear when you run the `rsub` command on the Mothership. ### On the Mothership Open an existing file (for example framework.cfg) that you'd like to edit in Sublime Text (or create a new one by naming it): ```bash subl framework.cfg ``` And enjoy editing your text file on Sublime Text! It will sync the contents of the file when you save. ### FAQ and initial installation notes You keep calling it `rsub` or `subl` but I keep seeing `rmate` everywhere -- what gives? The `rsub` command is using the utility originally created for TextMate, which was called using `rmate`. Since this is an update and uses Sublime Text, it's updated to `rsub`. #### Ports You shouldn't have to worry about this unless you are an admin or something has gone wrong. If you need to choose different ports or assign them, check that nothing is using them on the mothership that you want to use by running something like: ```bash sudo netstat -plant | grep {YOUR_PORT} ``` and verifying that nothing's returned. #### Installing rsub on mothership Install rsub (this requires root -- can install locally if not on the edgenode) ``` sudo wget -O /usr/local/bin/subl https://raw.github.com/aurora/rmate/master/rmate sudo chmod +x /usr/local/bin/subl ```
github_jupyter
<a href="https://colab.research.google.com/github/leehanchung/cs224w/blob/main/notebooks/XCS224W_Colab3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # **CS224W - Colab 3** In Colab 2 we constructed GNN models by using PyTorch Geometric's built in GCN layer, `GCNConv`. In this Colab we will go a step deeper and implement the **GraphSAGE** ([Hamilton et al. (2017)](https://arxiv.org/abs/1706.02216)) and **GAT** ([Veličković et al. (2018)](https://arxiv.org/abs/1710.10903)) layers directly. Then we will run and test our models on the CORA dataset, a standard citation network benchmark dataset. Next, we will use [DeepSNAP](https://snap.stanford.edu/deepsnap/), a Python library assisting efficient deep learning on graphs, to split the graphs in different settings and apply dataset transformations. Lastly, using DeepSNAP's transductive link prediction dataset spliting functionality, we will construct a simple GNN model for the task of edge property predition (link prediction). **Note**: Make sure to **sequentially run all the cells in each section** so that the intermediate variables / packages will carry over to the next cell Have fun and good luck on Colab 3 :) # Device We recommend using a GPU for this Colab. Please click `Runtime` and then `Change runtime type`. Then set the `hardware accelerator` to **GPU**. ## Installation ``` # Install torch geometric import os if 'IS_GRADESCOPE_ENV' not in os.environ: !pip uninstall torch-scatter --y !pip uninstall torch-sparse --y !pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html !pip install torch-sparse -f https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html !pip install torch-geometric !pip install -q git+https://github.com/snap-stanford/deepsnap.git import torch_geometric torch_geometric.__version__ ``` # 1) GNN Layers ## Implementing Layer Modules In Colab 2, we implemented a GCN model for node and graph classification tasks. However, for that notebook we took advantage of PyG's built in GCN module. For Colab 3, we provide a build upon a general Graph Neural Network Stack, into which we will be able to plugin our own module implementations: GraphSAGE and GAT. We will then use our layer implemenations to complete node classification on the CORA dataset, a standard citation network benchmark. In this dataset, nodes correspond to documents and edges correspond to undirected citations. Each node or document in the graph is assigned a class label and features based on the documents binarized bag-of-words representation. Specifically, the Cora graph has 2708 nodes, 5429 edges, 7 prediction classes, and 1433 features per node. ## GNN Stack Module Below is the implementation of a general GNN stack, where we can plugin any GNN layer, such as **GraphSage**, **GAT**, etc. This module is provided for you. Your implementations of the **GraphSage** and **GAT** layers will function as components in the GNNStack Module. ``` import torch import torch_scatter import torch.nn as nn import torch.nn.functional as F import torch_geometric.nn as pyg_nn import torch_geometric.utils as pyg_utils from torch import Tensor from typing import Union, Tuple, Optional from torch_geometric.typing import (OptPairTensor, Adj, Size, NoneType, OptTensor) from torch.nn import Parameter, Linear from torch_sparse import SparseTensor, set_diag from torch_geometric.nn.conv import MessagePassing from torch_geometric.utils import remove_self_loops, add_self_loops, softmax class GNNStack(torch.nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, args, emb=False): super(GNNStack, self).__init__() conv_model = self.build_conv_model(args.model_type) self.convs = nn.ModuleList() self.convs.append(conv_model(input_dim, hidden_dim)) assert (args.num_layers >= 1), 'Number of layers is not >=1' for l in range(args.num_layers-1): self.convs.append(conv_model(args.heads * hidden_dim, hidden_dim)) # post-message-passing self.post_mp = nn.Sequential( nn.Linear(args.heads * hidden_dim, hidden_dim), nn.Dropout(args.dropout), nn.Linear(hidden_dim, output_dim)) self.dropout = args.dropout self.num_layers = args.num_layers self.emb = emb def build_conv_model(self, model_type): if model_type == 'GraphSage': return GraphSage elif model_type == 'GAT': # When applying GAT with num heads > 1, you need to modify the # input and output dimension of the conv layers (self.convs), # to ensure that the input dim of the next layer is num heads # multiplied by the output dim of the previous layer. # HINT: In case you want to play with multiheads, you need to change the for-loop that builds up self.convs to be # self.convs.append(conv_model(hidden_dim * num_heads, hidden_dim)), # and also the first nn.Linear(hidden_dim * num_heads, hidden_dim) in post-message-passing. return GAT def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch for i in range(self.num_layers): x = self.convs[i](x, edge_index) x = F.relu(x) x = F.dropout(x, p=self.dropout,training=self.training) x = self.post_mp(x) if self.emb == True: return x return F.log_softmax(x, dim=1) def loss(self, pred, label): return F.nll_loss(pred, label) ``` ## Creating Our Own Message Passing Layer Now let's start implementing our own message passing layers! Working through this part will help us become acutely familiar with the behind the scenes work of implementing Pytorch Message Passing Layers, allowing us to build our own GNN models. To do so, we will work with and implement 3 critcal functions needed to define a PyG Message Passing Layer: `forward`, `message`, and `aggregate`. Before diving head first into the coding details, let us quickly review the key components of the message passing process. To do so, we will focus on a single round of messsage passing with respect to a single central node $x$. Before message passing, $x$ is associated with a feature vector $x^{l-1}$, and the goal of message passing is to update this feature vector as $x^l$. To do so, we implement the following steps: 1) each neighboring node $v$ passes its current message $v^{l-1}$ across the edge $(x, v)$ - 2) for the node $x$, we aggregate all of the messages of the neighboring nodes (for example through a sum or mean) - and 3) we transform the aggregated information by for example applying linear and non-linear transformations. Altogether, the message passing process is applied such that every node $u$ in our graph updates its embedding by acting as the central node $x$ in step 1-3 described above. Now, we extending this process to that of a single message passing layer, the job of a message passing layer is to update the current feature representation or embedding of each node in a graph by propagating and transforming information within the graph. Overall, the general paradigm of a message passing layers is: 1) pre-processing -> 2) **message passing** / propagation -> 3) post-processing. The `forward` fuction that we will implement for our message passing layer captures this execution logic. Namely, the `forward` function handles the pre and post-processing of node features / embeddings, as well as initiates message passing by calling the `propagate` function. The `propagate` function encapsulates the message passing process! It does so by calling three important functions: 1) `message`, 2) `aggregate`, and 3) `update`. Our implementation will vary slightly from this, as we will not explicitly implement `update`, but instead place the logic for updating node embeddings after message passing and within the `forward` function. To be more specific, after information is propagated (message passing), we can further transform the node embeddings outputed by `propagate`. Therefore, the output of `forward` is exactly the node embeddings after one GNN layer. Lastly, before starting to implement our own layer, let us dig a bit deeper into each of the functions described above: 1. ``` def propagate(edge_index, x=(x_i, x_j), extra=(extra_i, extra_j), size=size): ``` Calling `propagate` initiates the message passing process. Looking at the function parameters, we highlight a couple of key parameters. - `edge_index` is passed to the forward function and captures the edge structure of the graph. - `x=(x_i, x_j)` represents the node features that will be used in message passing. In order to explain why we pass the tuple `(x_i, x_j)`, we first look at how our edges are represented. For every edge $(i, j) \in \mathcal{E}$, we can differentiate $i$ as the source or central node ($x_{central}$) and j as the neighboring node ($x_{neighbor}$). Taking the example of message passing above, for a central node $u$ we will aggregate and transform all of the messages associated with the nodes $v$ s.t. $(u, v) \in \mathcal{E}$ (i.e. $v \in \mathcal{N}_{u}$). Thus we see, the subscripts `_i` and `_j` allow us to specifcally differenciate features associated with central nodes (i.e. nodes recieving message information) and neighboring nodes (i.e. nodes passing messages). This is definitely a somewhat confusing concept; however, one key thing to remember / wrap your head around is that depending on the perspective, a node $x$ acts as a central node or a neighboring node. In fact, in undirected graphs we store both edge directions (i.e. $(i, j)$ and $(j, i)$). From the central node perspective, `x_i`, x is collecting neighboring information to update its embedding. From a neighboring node perspective, `x_j`, x is passing its message information along the edge connecting it to a different central node. - `extra=(extra_i, extra_j)` represents additional information that we can associate with each node beyond its current feature embedding. In fact, we can include as many additional parameters of the form `param=(param_i, param_j)` as we would like. Again, we highlight that indexing with `_i` and `_j` allows us to differentiate central and neighboring nodes. The output of the `propagate` function is a matrix of node embeddings after the message passing process and has shape $[N, d]$. 2. ``` def message(x_j, ...): ``` The `message` function is called by propagate and constructs the messages from neighboring nodes $j$ to central nodes $i$ for each edge $(i, j)$ in *edge_index*. This function can take any argument that was initially passed to `propagate`. Furthermore, we can again differentiate central nodes and neighboring nodes by appending `_i` or `_j` to the variable name, .e.g. `x_i` and `x_j`. Looking more specifically at the variables, we have: - `x_j` represents a matrix of feature embeddings for all neighboring nodes passing their messages along their respective edge (i.e. all nodes $j$ for edges $(i, j) \in \mathcal{E}$). Thus, its shape is $[|\mathcal{E}|, d]$! - In implementing GAT we will see how to access additional variables passed to propagate Critically, we see that the output of the `message` function is a matrix of neighboring node embeddings ready to be aggregated, having shape $[|\mathcal{E}|, d]$. 3. ``` def aggregate(self, inputs, index, dim_size = None): ``` Lastly, the `aggregate` function is used to aggregate the messages from neighboring nodes. Looking at the parameters we highlight: - `inputs` represents a matrix of the messages passed from neighboring nodes (i.e. the output of the `message` function). - `index` has the same shape as `inputs` and tells us the central node that corresponding to each of the rows / messages $j$ in the `inputs` matrix. Thus, `index` tells us which rows / messages to aggregate for each central node. The output of `aggregate` is of shape $[N, d]$. For additional resources refer to the PyG documentation for implementing custom message passing layers: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html ## GraphSage Implementation For our first GNN layer, we will implement the well known GraphSage ([Hamilton et al. (2017)](https://arxiv.org/abs/1706.02216)) layer! For a given *central* node $v$ with current embedding $h_v^{l-1}$, the message passing update rule to tranform $h_v^{l-1} \rightarrow h_v^l$ is as follows: \begin{equation} h_v^{(l)} = W_l\cdot h_v^{(l-1)} + W_r \cdot AGG(\{h_u^{(l-1)}, \forall u \in N(v) \}) \end{equation} where $W_1$ and $W_2$ are learanble weight matrices and the nodes $u$ are *neighboring* nodes. Additionally, we use mean aggregation for simplicity: \begin{equation} AGG(\{h_u^{(l-1)}, \forall u \in N(v) \}) = \frac{1}{|N(v)|} \sum_{u\in N(v)} h_u^{(l-1)} \end{equation} One thing to note is that we're adding a **skip connection** to our GraphSage implementation through the term $W_l\cdot h_v^{(l-1)}$. Before implementing this update rule, we encourage you to think about how different parts of the formulas above correspond with the functions outlined earlier: 1) `forward`, 2) `message`, and 3) `aggregate`. As a hint, we are given what the aggregation function is (i.e. mean aggregation)! Now the question remains, what are the messages passed by each neighbor nodes and when do we call the `propagate` function? Note: in this case the message function or messages are actually quite simple. Additionally, remember that the `propagate` function encapsulates the operations of / the outputs of the combined `message` and `aggregate` functions. Lastly, $\ell$-2 normalization of the node embeddings is applied after each iteration. <font color='red'>For the following questions, DON'T refer to any existing implementations online.</font> ``` class GraphSage(MessagePassing): def __init__(self, in_channels, out_channels, normalize = True, bias = False, **kwargs): super(GraphSage, self).__init__(**kwargs) self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.lin_l = None self.lin_r = None ############################################################################ # TODO: Your code here! # Define the layers needed for the message and update functions below. # self.lin_l is the linear transformation that you apply to embedding # for central node. # self.lin_r is the linear transformation that you apply to aggregated # message from neighbors. # Our implementation is ~2 lines, but don't worry if you deviate from this. self.lin_l = nn.Linear(self.in_channels, self.out_channels) self.lin_r = nn.Linear(self.in_channels, self.out_channels) ############################################################################ self.reset_parameters() def reset_parameters(self): self.lin_l.reset_parameters() self.lin_r.reset_parameters() def forward(self, x, edge_index, size = None): """""" out = None ############################################################################ # TODO: Your code here! # Implement message passing, as well as any post-processing (our update rule). # 1. Call propagate function to conduct the message passing. # 1.1 See the description of propagate above or the following link for more information: # https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html # 1.2 We will only use the representation for neighbor nodes (x_j), so by default # we pass the same representation for central and neighbor nodes as x=(x, x). # 2. Update our node embedding with skip connection. # 3. If normalize is set, do L-2 normalization (defined in # torch.nn.functional) # # Our implementation is ~5 lines, but don't worry if you deviate from this. x_propagate = self.propagate(edge_index, x=(x, x), size=size) x = self.lin_l(x) + x_propagate if self.normalize: x = F.normalize(x) out = x ############################################################################ return out def message(self, x_j): out = None ############################################################################ # TODO: Your code here! # Implement your message function here. # Hint: Look at the formulation of the mean aggregation function, focusing on # what message each neighboring node passes. # # Our implementation is ~1 lines, but don't worry if you deviate from this. out = self.lin_r(x_j) ############################################################################ return out def aggregate(self, inputs, index, dim_size = None): out = None # The axis along which to index number of nodes. node_dim = self.node_dim ############################################################################ # TODO: Your code here! # Implement your aggregate function here. # See here as how to use torch_scatter.scatter: # https://pytorch-scatter.readthedocs.io/en/latest/functions/scatter.html#torch_scatter.scatter # # Our implementation is ~1 lines, but don't worry if you deviate from this. out = torch_scatter.scatter(inputs, index, dim=node_dim, reduce='mean') ############################################################################ return out ``` ## GAT Implementation Attention mechanisms have become the state-of-the-art in many sequence-based tasks such as machine translation and learning sentence representations. One of the major benefits of attention-based mechanisms is their ability to focus on the most relevant parts of the input to make decisions. In this problem, we will see how attention mechanisms can be used to perform node classification over graph-structured data through the usage of Graph Attention Networks (GATs) ([Veličković et al. (2018)](https://arxiv.org/abs/1710.10903)). The building block of the Graph Attention Network is the graph attention layer, which is a variant of the aggregation function. Let $N$ be the number of nodes and $F$ be the dimension of the feature vector for each node. The input to each graph attentional layer is a set of node features: $\mathbf{h} = \{\overrightarrow{h_1}, \overrightarrow{h_2}, \dots, \overrightarrow{h_N}$\}, $\overrightarrow{h_i} \in R^F$. The output of each graph attentional layer is a new set of node features, which may have a new dimension $F'$: $\mathbf{h'} = \{\overrightarrow{h_1'}, \overrightarrow{h_2'}, \dots, \overrightarrow{h_N'}\}$, with $\overrightarrow{h_i'} \in \mathbb{R}^{F'}$. We will now describe how this transformation is performed for each graph attention layer. First, a shared linear transformation parametrized by the weight matrix $\mathbf{W} \in \mathbb{R}^{F' \times F}$ is applied to every node. Next, we perform self-attention on the nodes. We use a shared attention function $a$: \begin{equation} a : \mathbb{R}^{F'} \times \mathbb{R}^{F'} \rightarrow \mathbb{R}. \end{equation} that computes the attention coefficients capturing the importance of node $j$'s features to node $i$: \begin{equation} e_{ij} = a(\mathbf{W_l}\overrightarrow{h_i}, \mathbf{W_r} \overrightarrow{h_j}) \end{equation} The most general formulation of self-attention allows every node to attend to all other nodes which drops all structural information. However, to utilize graph structure in the attention mechanisms, we use **masked attention**. In masked attention, we only compute attention coefficients $e_{ij}$ for nodes $j \in \mathcal{N}_i$ where $\mathcal{N}_i$ is some neighborhood of node $i$ in the graph. To easily compare coefficients across different nodes, we normalize the coefficients across $j$ using a softmax function: \begin{equation} \alpha_{ij} = \text{softmax}_j(e_{ij}) = \frac{\exp(e_{ij})}{\sum_{k \in \mathcal{N}_i} \exp(e_{ik})} \end{equation} For this problem, our attention mechanism $a$ will be a single-layer feedforward neural network parametrized by a weight vectors $\overrightarrow{a} \in \mathbb{R}^{F'}$ and $\overrightarrow{a} \in \mathbb{R}^{F'}$, followed by a LeakyReLU nonlinearity (with negative input slope 0.2). Let $\cdot^T$ represent transposition and $||$ represent concatenation. The coefficients computed by our attention mechanism may be expressed as: \begin{equation} \alpha_{ij} = \frac{\exp\Big(\text{LeakyReLU}\Big(\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i} + \overrightarrow{a_r}^T\mathbf{W_r}\overrightarrow{h_j}\Big)\Big)}{\sum_{k\in \mathcal{N}_i} \exp\Big(\text{LeakyReLU}\Big(\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i} + \overrightarrow{a_r}^T\mathbf{W_r}\overrightarrow{h_k}\Big)\Big)} \end{equation} For the following questions, we denote `alpha_l` = $\alpha_l = [...,\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i},...] \in \mathcal{R}^n$ and `alpha_r` = $\alpha_r = [..., \overrightarrow{a_r}^T \mathbf{W_r} \overrightarrow{h_j}, ...] \in \mathcal{R}^n$. At every layer of GAT, after the attention coefficients are computed for that layer, the aggregation function can be computed by a weighted sum of neighborhood messages, where weights are specified by $\alpha_{ij}$. Now, we use the normalized attention coefficients to compute a linear combination of the features corresponding to them. These aggregated features will serve as the final output features for every node. \begin{equation} h_i' = \sum_{j \in \mathcal{N}_i} \alpha_{ij} \mathbf{W_r} \overrightarrow{h_j}. \end{equation} At this point, we have covered a lot of information! Before reading further about multi-head attention, we encourage you to go again through the excersize of thinking about what components of the attention mechanism correspond with the different funcitons: 1) `forward`, 2) `message`, and 3 `aggregate`. - Hint 1: Our aggregation is very similar to that of GraphSage except now we are using sum aggregation - Hint 2: The terms we aggregate over again represent the individual message that each neighbor node j sends. Thus, we see that $\alpha_{ij}$ is part of the message each node sends and is thus computed during the message step. This makes sense since an attention weight is associated with each edge in the graph. - Hint 3: Look at the terms in the definition of $\alpha_{ij}$. What values do we want to pre-process and pass as parameters to the `propagate` function. The parameters of `message(..., x_j, alpha_j, alpha_i, ...)` should give a good hint. ### Multi-Head Attention To stabilize the learning process of self-attention, we use multi-head attention. To do this we use $K$ independent attention mechanisms, or ``heads'' compute output features as in the above equations. Then, we concatenate these output feature representations: \begin{equation} \overrightarrow{h_i}' = ||_{k=1}^K \Big(\sum_{j \in \mathcal{N}_i} \alpha_{ij}^{(k)} \mathbf{W_r}^{(k)} \overrightarrow{h_j}\Big) \end{equation} where $||$ is concentation, $\alpha_{ij}^{(k)}$ are the normalized attention coefficients computed by the $k$-th attention mechanism $(a^k)$, and $\mathbf{W}^{(k)}$ is the corresponding input linear transformation's weight matrix. Note that for this setting, $\mathbf{h'} \in \mathbb{R}^{KF'}$. ``` class GAT(MessagePassing): def __init__(self, in_channels, out_channels, heads = 2, negative_slope = 0.2, dropout = 0., **kwargs): super(GAT, self).__init__(node_dim=0, **kwargs) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.negative_slope = negative_slope self.dropout = dropout self.lin_l = None self.lin_r = None self.att_l = None self.att_r = None ############################################################################ # TODO: Your code here! # Define the layers needed for the message functions below. # self.lin_l is the linear transformation that you apply to embeddings # BEFORE message passing. # # Pay attention to dimensions of the linear layers, since we're using # multi-head attention. # Our implementation is ~1 lines, but don't worry if you deviate from this. self.lin_l = nn.Linear(self.in_channels, self.heads * self.out_channels) ############################################################################ self.lin_r = self.lin_l ############################################################################ # TODO: Your code here! # Define the attention parameters \overrightarrow{a_l/r}^T in the above intro. # You have to deal with multi-head scenarios. # Use nn.Parameter instead of nn.Linear # Our implementation is ~2 lines, but don't worry if you deviate from this. self.att_l = nn.Parameter(torch.randn(heads, self.out_channels)) self.att_r = nn.Parameter(torch.randn(heads, self.out_channels)) ############################################################################ self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.lin_l.weight) nn.init.xavier_uniform_(self.lin_r.weight) nn.init.xavier_uniform_(self.att_l) nn.init.xavier_uniform_(self.att_r) def forward(self, x, edge_index, size = None): H, C = self.heads, self.out_channels ############################################################################ # TODO: Your code here! # Implement message passing, as well as any pre- and post-processing (our update rule). # 1. First apply linear transformation to node embeddings, and split that # into multiple heads. We use the same representations for source and # target nodes, but apply different linear weights (W_l and W_r) # 2. Calculate alpha vectors for central nodes (alpha_l) and neighbor nodes (alpha_r). # 3. Call propagate function to conduct the message passing. # 3.1 Remember to pass alpha = (alpha_l, alpha_r) as a parameter. # 3.2 See there for more information: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html # 4. Transform the output back to the shape of N * d. # Our implementation is ~5 lines, but don't worry if you deviate from this. # x_l dims: N x H x C x_l = self.lin_l(x).view(-1, H, C) # x_r dims: N x H x C x_r = self.lin_r(x).view(-1, H, C) # alpha_l dims: # 1 x H x C * N x H x C alpha_l = self.att_l.unsqueeze(0) * x_l # alpha_r dims: # 1 x H x C * N x H x C alpha_r = self.att_r.unsqueeze(0) * x_r out = self.propagate(edge_index, x = (x_l, x_r), alpha=(alpha_l, alpha_r)) out = out.view(-1, H*C) ############################################################################ return out def message(self, x_j, alpha_j, alpha_i, index, ptr, size_i): ############################################################################ # TODO: Your code here! # Implement your message function. Putting the attention in message # instead of in update is a little tricky. # 1. Calculate the final attention weights using alpha_i and alpha_j, # and apply leaky Relu. # 2. Calculate softmax over the neighbor nodes for all the nodes. Use # torch_geometric.utils.softmax instead of the one in Pytorch. # 3. Apply dropout to attention weights (alpha). # 4. Multiply embeddings and attention weights. As a sanity check, the output # should be of shape E * H * d. # 5. ptr (LongTensor, optional): If given, computes the softmax based on # sorted inputs in CSR representation. You can simply pass it to softmax. # Our implementation is ~5 lines, but don't worry if you deviate from this. alpha_ij = F.leaky_relu(alpha_i + alpha_j, negative_slope=self.negative_slope) if ptr is None: alpha_ij = softmax(alpha_ij, index) else: alpha_ij = softmax(alphaij, ptr) alpha_ij = F.dropout(alpha_ij, p=self.dropout) out = x_j * alpha_ij ############################################################################ return out def aggregate(self, inputs, index, dim_size = None): ############################################################################ # TODO: Your code here! # Implement your aggregate function here. # See here as how to use torch_scatter.scatter: https://pytorch-scatter.readthedocs.io/en/latest/_modules/torch_scatter/scatter.html # Pay attention to "reduce" parameter is different from that in GraphSage. # Our implementation is ~1 lines, but don't worry if you deviate from this. out = torch_scatter.scatter(inputs, index, dim=self.node_dim, reduce='sum') ############################################################################ return out ``` ## Building Optimizers This function has been implemented for you. **For grading purposes please use the default Adam optimizer**, but feel free to play with other types of optimizers on your own. ``` import torch.optim as optim def build_optimizer(args, params): weight_decay = args.weight_decay filter_fn = filter(lambda p : p.requires_grad, params) if args.opt == 'adam': optimizer = optim.Adam(filter_fn, lr=args.lr, weight_decay=weight_decay) elif args.opt == 'sgd': optimizer = optim.SGD(filter_fn, lr=args.lr, momentum=0.95, weight_decay=weight_decay) elif args.opt == 'rmsprop': optimizer = optim.RMSprop(filter_fn, lr=args.lr, weight_decay=weight_decay) elif args.opt == 'adagrad': optimizer = optim.Adagrad(filter_fn, lr=args.lr, weight_decay=weight_decay) if args.opt_scheduler == 'none': return None, optimizer elif args.opt_scheduler == 'step': scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.opt_decay_step, gamma=args.opt_decay_rate) elif args.opt_scheduler == 'cos': scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.opt_restart) return scheduler, optimizer ``` ## Training and Testing Here we provide you with the functions to train and test. **Please do not modify this part for grading purposes.** ``` import time import networkx as nx import numpy as np import torch import torch.optim as optim from tqdm import trange import pandas as pd import copy from torch_geometric.datasets import TUDataset from torch_geometric.datasets import Planetoid from torch_geometric.data import DataLoader import torch_geometric.nn as pyg_nn import matplotlib.pyplot as plt def train(dataset, args): print("Node task. test set size:", np.sum(dataset[0]['test_mask'].numpy())) print() test_loader = loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False) # build model model = GNNStack(dataset.num_node_features, args.hidden_dim, dataset.num_classes, args) scheduler, opt = build_optimizer(args, model.parameters()) # train losses = [] test_accs = [] best_acc = 0 best_model = None for epoch in trange(args.epochs, desc="Training", unit="Epochs"): total_loss = 0 model.train() for batch in loader: opt.zero_grad() pred = model(batch) label = batch.y pred = pred[batch.train_mask] label = label[batch.train_mask] loss = model.loss(pred, label) loss.backward() opt.step() total_loss += loss.item() * batch.num_graphs total_loss /= len(loader.dataset) losses.append(total_loss) if epoch % 10 == 0: test_acc = test(test_loader, model) test_accs.append(test_acc) if test_acc > best_acc: best_acc = test_acc best_model = copy.deepcopy(model) else: test_accs.append(test_accs[-1]) return test_accs, losses, best_model, best_acc, test_loader def test(loader, test_model, is_validation=False, save_model_preds=False, model_type=None): test_model.eval() correct = 0 # Note that Cora is only one graph! for data in loader: with torch.no_grad(): # max(dim=1) returns values, indices tuple; only need indices pred = test_model(data).max(dim=1)[1] label = data.y mask = data.val_mask if is_validation else data.test_mask # node classification: only evaluate on nodes in test set pred = pred[mask] label = label[mask] if save_model_preds: print ("Saving Model Predictions for Model Type", model_type) data = {} data['pred'] = pred.view(-1).cpu().detach().numpy() data['label'] = label.view(-1).cpu().detach().numpy() df = pd.DataFrame(data=data) # Save locally as csv df.to_csv('CORA-Node-' + model_type + '.csv', sep=',', index=False) correct += pred.eq(label).sum().item() total = 0 for data in loader.dataset: total += torch.sum(data.val_mask if is_validation else data.test_mask).item() return correct / total class objectview(object): def __init__(self, d): self.__dict__ = d ``` ## Let's Start the Training! We will be working on the CORA dataset on node-level classification. This part is implemented for you. **For grading purposes, please do not modify the default parameters.** However, feel free to play with different configurations just for fun! **Submit your best accuracy and loss on Gradescope.** ``` if 'IS_GRADESCOPE_ENV' not in os.environ: for args in [ {'model_type': 'GraphSage', 'dataset': 'cora', 'num_layers': 2, 'heads': 1, 'batch_size': 32, 'hidden_dim': 32, 'dropout': 0.5, 'epochs': 500, 'opt': 'adam', 'opt_scheduler': 'none', 'opt_restart': 0, 'weight_decay': 5e-3, 'lr': 0.01}, ]: args = objectview(args) for model in ['GraphSage', 'GAT']: args.model_type = model # Match the dimension. if model == 'GAT': args.heads = 2 else: args.heads = 1 if args.dataset == 'cora': dataset = Planetoid(root='/tmp/cora', name='Cora') else: raise NotImplementedError("Unknown dataset") test_accs, losses, best_model, best_acc, test_loader = train(dataset, args) print("Maximum test set accuracy: {0}".format(max(test_accs))) print("Minimum loss: {0}".format(min(losses))) # Run test for our best model to save the predictions! test(test_loader, best_model, is_validation=False, save_model_preds=True, model_type=model) print() plt.title(dataset.name) plt.plot(losses, label="training loss" + " - " + args.model_type) plt.plot(test_accs, label="test accuracy" + " - " + args.model_type) plt.legend() plt.show() ``` ## Question 1.1: What is the maximum accuracy obtained on the test set for GraphSage? (10 points) Running the cell above will show the results of your best model and save your best model's predictions to a file named *CORA-Node-GraphSage.csv*. As we have seen before you can view this file by clicking on the *Folder* icon on the left side pannel. When you sumbit your assignment, you will have to download this file and attatch it to your submission. ## Question 1.2: What is the maximum accuracy obtained on test set for GAT? (10 points) Running the training cell above will also save your best GAT model predictions as *CORA-Node-GAT.csv*. When you sumbit your assignment, you will have to download this file and attatch it to your submission. # 2) DeepSNAP Basics In previous Colabs, we have seen graph class (NetworkX) and tensor (PyG) representations of graphs. The graph class `nx.Graph` provides rich analysis and manipulation functionalities, such as computing the clustering coefficient and PageRank vector for a graph. When working with PyG we were then introduced to tensor based representation of graphs (i.e. edge tensor `edge_index` and node attributes tensors `x` and `y`). In this section, we present DeepSNAP, a package that combines the benifits of both graph representations and offers a full pipeline for GNN training / validation / and testing. Namely, DeepSNAP includes a graph class representation to allow for more efficient graph manipulation and analysis in addition to a tensor based representation for efficient message passing computation. In general, [DeepSNAP](https://github.com/snap-stanford/deepsnap) is a Python library to assist efficient deep learning on graphs. DeepSNAP enables flexible graph manipulation, standard graph learning pipelines, heterogeneous graphs, and ovearll represents a simple graph learning API. In more detail: 1. DeepSNAP allows for sophisticated graph manipulations, such as feature computation, pretraining, subgraph extraction etc. during/before training. 2. DeepSNAP standardizes the pipelines for node, edge, and graph-level prediction tasks under inductive or transductive settings. Specifically, DeepSNAP removes previous non-trivial / repetative design choices left to the user, such as how to split datasets. DeepSNAP thus greatly saves repetitive often non-trivial coding efforts and enables fair model comparison. 3. Many real-world graphs are heterogeneous in nature (i.e. include different node types or edge types). However, most packages lack complete support for heterogeneous graphs, including data storage and flexible message passing. DeepSNAP provides an efficient and flexible heterogeneous graph that supports both node and edge heterogeneity. In this next section, we will focus on working with DeepSNAP for graph manipulation and dataset splitting. [DeepSNAP](https://github.com/snap-stanford/deepsnap) is a newly released project and it is still under development. If you find any bugs or have any improvement ideas, feel free to raise issues or create pull requests on the GitHub directly :) ## Setup ``` import torch import networkx as nx import matplotlib.pyplot as plt from deepsnap.graph import Graph from deepsnap.batch import Batch from deepsnap.dataset import GraphDataset from torch_geometric.datasets import Planetoid, TUDataset from torch.utils.data import DataLoader def visualize(G, color_map=None, seed=123): if color_map is None: color_map = '#c92506' plt.figure(figsize=(8, 8)) nodes = nx.draw_networkx_nodes(G, pos=nx.spring_layout(G, seed=seed), \ label=None, node_color=color_map, node_shape='o', node_size=150) edges = nx.draw_networkx_edges(G, pos=nx.spring_layout(G, seed=seed), alpha=0.5) if color_map is not None: plt.scatter([],[], c='#c92506', label='Nodes with label 0', edgecolors="black", s=140) plt.scatter([],[], c='#fcec00', label='Nodes with label 1', edgecolors="black", s=140) plt.legend(prop={'size': 13}, handletextpad=0) nodes.set_edgecolor('black') plt.show() ``` ## DeepSNAP Graph The `deepsnap.graph.Graph` class is the core class of DeepSNAP. It not only represents a graph in tensor format but also includes a graph object from a graph manipulation package. Currently DeepSNAP supports [NetworkX](https://networkx.org/) and [Snap.py](https://snap.stanford.edu/snappy/doc/index.html) as back end graph manipulation packages. In this Colab, we will focus on using NetworkX as the back end graph manipulation package. ### NetworkX to DeepSNAP To begin, let us first work through converting a simple random NetworkX graph to a DeepSNAP graph. ``` if 'IS_GRADESCOPE_ENV' not in os.environ: num_nodes = 100 p = 0.05 seed = 100 # Generate a networkx random graph G = nx.gnp_random_graph(num_nodes, p, seed=seed) # Generate some random node features and labels node_feature = {node : torch.rand([5, ]) for node in G.nodes()} node_label = {node : torch.randint(0, 2, ()) for node in G.nodes()} # Set the random features and labels to G nx.set_node_attributes(G, node_feature, name='node_feature') nx.set_node_attributes(G, node_label, name='node_label') # Print one node example for node in G.nodes(data=True): print(node) break color_map = ['#c92506' if node[1]['node_label'].item() == 0 else '#fcec00' for node in G.nodes(data=True)] # Visualize the graph visualize(G, color_map=color_map) # Transform the networkx graph into the deepsnap graph graph = Graph(G) # Print out the general deepsnap graph information print(graph) # DeepSNAP will convert node attributes to tensors # Notice the type of tensors print("Node feature (node_feature) has shape {} and type {}".format(graph.node_feature.shape, graph.node_feature.dtype)) print("Node label (node_label) has shape {} and type {}".format(graph.node_label.shape, graph.node_label.dtype)) # DeepSNAP will also generate the edge_index tensor print("Edge index (edge_index) has shape {} and type {}".format(graph.edge_index.shape, graph.edge_index.dtype)) # Different from only storing tensors, deepsnap graph also references to the networkx graph # We will discuss why the reference will be helpful later print("The DeepSNAP graph has {} as the internal manupulation graph".format(type(graph.G))) ``` ### Tensor graph attributes Similar to the native PyG tensor based representation, DeepSNAP includes a graph tensor based representation with three levels of graph attributes. In this example, we primarily have **node level** attributes including `node_feature` and `node_label`. The other two levels of attributes are **edge** and **graph** attributes. Similar to node level attributes, these attributes are prefixed by their respective type. For example, the features become `edge_feature` or `graph_feature` and labels becomes `edge_label` or `graph_label` etc. ### Graph Object DeepSNAP additionally allows us to easily access graph information through the backend graph object and graph manipulation package. ``` if 'IS_GRADESCOPE_ENV' not in os.environ: # Number of nodes print("The random graph has {} nodes".format(graph.num_nodes)) # Number of edges print("The random graph has {} edges".format(graph.num_edges)) ``` ### PyG to DeepSNAP Lastly, DeepSNAP provides functionality to automatically transform a PyG dataset into a list of DeepSNAP graphs. Here we transform the CORA dataset into a list with one DeepSNAP graph (i.e. the singular CORA graph). ``` if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cora' name = 'Cora' # The Cora dataset pyg_dataset= Planetoid(root, name) # PyG dataset to a list of deepsnap graphs graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Get the first deepsnap graph (CORA only has one graph) graph = graphs[0] print(graph) ``` ## Question 2.1: How many classes are in the CORA graph? How many features does each node have? (5 points) ``` def get_num_node_classes(graph): # TODO: Implement a function that takes a deepsnap graph object # and return the number of node classes of that graph. num_node_classes = 0 ############# Your code here ############# ## (~1 line of code) ## Note ## 1. Colab autocomplete functionality might be useful ## 2. DeepSNAP documentation might be useful https://snap.stanford.edu/deepsnap/modules/graph.html num_node_classes = graph.num_node_labels ########################################## return num_node_classes def get_num_node_features(graph): # TODO: Implement a function that takes a deepsnap graph object # and return the number of node features of that graph. num_node_features = 0 ############# Your code here ############# ## (~1 line of code) ## Note ## 1. Colab autocomplete functionality might be useful ## 2. DeepSNAP documentation might be useful https://snap.stanford.edu/deepsnap/modules/graph.html num_node_features = graph.num_node_features ########################################## return num_node_features if 'IS_GRADESCOPE_ENV' not in os.environ: num_node_classes = get_num_node_classes(graph) num_node_features = get_num_node_features(graph) print("{} has {} classes".format(name, num_node_classes)) print("{} has {} features".format(name, num_node_features)) ``` ## DeepSNAP Dataset Now, we will learn how to create DeepSNAP datasets. A `deepsnap.dataset.GraphDataset` contains a list of `deepsnap.graph.Graph` objects. In addition to the list of graphs, we specify what task the dataset will be used on, such as node level task (`task=node`), edge level task (`task=link_pred`) and graph level task (`task=graph`). The GraphDataset class contains many other useful parameters that can be specified during initialization. If you are interested, you can take a look at the [documentation](https://snap.stanford.edu/deepsnap/modules/dataset.html#deepsnap-graphdataset). As an example, we will first look at the COX2 dataset, which contains 467 graphs. In initializng our dataset, we convert the PyG dataset into its corresponding DeepSNAP dataset and specify the task to `graph`. ``` if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cox2' name = 'COX2' # Load the dataset through PyG pyg_dataset = TUDataset(root, name) # Convert to a list of deepsnap graphs graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Convert list of deepsnap graphs to deepsnap dataset with specified task=graph dataset = GraphDataset(graphs, task='graph') print(dataset) ``` ## Question 2.2: What is the label of the graph with index 100? (5 points) ``` def get_graph_class(dataset, idx): # TODO: Implement a function that takes a deepsnap dataset object, # the index of a graph in the dataset, and returns the class/label # of the graph (in integer). label = -1 ############# Your code here ############ ## (~1 line of code) ## Notice ## 1. The graph label refers to a graph-level attribute label = dataset[idx].graph_label ######################################### return label if 'IS_GRADESCOPE_ENV' not in os.environ: graph_0 = dataset[0] print(graph_0) idx = 100 label = get_graph_class(dataset, idx) print('Graph with index {} has label {}'.format(idx, label)) ``` ## Question 2.3: How many edges are in the graph with index 200? (5 points) ``` def get_graph_num_edges(dataset, idx): # TODO: Implement a function that takes a deepsnap dataset object, # the index of a graph in dataset, and returns the number of # edges in the graph (in integer). num_edges = 0 ############# Your code here ############ ## (~1 lines of code) ## Note ## 1. You can use the class property directly num_edges = dataset[idx].num_edges ######################################### return num_edges if 'IS_GRADESCOPE_ENV' not in os.environ: idx = 200 num_edges = get_graph_num_edges(dataset, idx) print('Graph with index {} has {} edges'.format(idx, num_edges)) ``` # 3) DeepSNAP Advanced Now that we have learned the basics of DeepSNAP lets move on to some more advanced functionalities. In this section we will use DeepSNAP for graph feature computation and transductive/inductive dataset splitting. ## Setup ``` import torch import networkx as nx import matplotlib.pyplot as plt from deepsnap.graph import Graph from deepsnap.batch import Batch from deepsnap.dataset import GraphDataset from torch_geometric.datasets import Planetoid, TUDataset from torch.utils.data import DataLoader ``` ## Data Split in Graphs As discussed in (LECTURE REFERENCE), data splitting for graphs can be much harder than for CV or NLP. In general, data splitting is divided into two settings, **inductive** and **transductive**. ## Inductive Split In an inductive setting, we split a list of multiple graphs into disjoint training/valiation and test sets. Here is an example of using DeepSNAP to inductively split a list of graphs for a graph level task (graph classification etc.): ``` if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cox2' name = 'COX2' pyg_dataset = TUDataset(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Here we specify the task as graph-level task such as graph classification task = 'graph' dataset = GraphDataset(graphs, task=task) # Specify transductive=False (inductive) dataset_train, dataset_val, dataset_test = dataset.split(transductive=False, split_ratio=[0.8, 0.1, 0.1]) print("COX2 train dataset: {}".format(dataset_train)) print("COX2 validation dataset: {}".format(dataset_val)) print("COX2 test dataset: {}".format(dataset_test)) ``` ## Transductive Split In the transductive setting, the training /validation / test sets are all over the same graph. As discussed in (LECTURE REF), we consider a transductive setting when we do not need to generalize to new unseen graphs. As an example, here we transductively split the CORA graph for a node level task, such as node classification. (Notice that in DeepSNAP the default split setting is random (i.e. DeepSNAP randomly splits the e.g. nodes into train / val / test); however, you can also use a fixed split by specifying `fixed_split=True` when loading the dataset from PyG or changing the `node_label_index` directly). ``` if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cora' name = 'Cora' pyg_dataset = Planetoid(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Here we specify the task as node-level task such as node classification task = 'node' dataset = GraphDataset(graphs, task=task) # Specify we want the transductive splitting dataset_train, dataset_val, dataset_test = dataset.split(transductive=True, split_ratio=[0.8, 0.1, 0.1]) print("Cora train dataset: {}".format(dataset_train)) print("Cora validation dataset: {}".format(dataset_val)) print("Cora test dataset: {}".format(dataset_test)) print("Original Cora has {} nodes".format(dataset.num_nodes[0])) # The nodes in each set can be find in node_label_index print("After the split, Cora has {} training nodes".format(dataset_train[0].node_label_index.shape[0])) print("After the split, Cora has {} validation nodes".format(dataset_val[0].node_label_index.shape[0])) print("After the split, Cora has {} test nodes".format(dataset_test[0].node_label_index.shape[0])) ``` ## Edge Level Split Compared to node and graph level splitting, edge level splitting is a little bit tricky ;) For edge level splitting we need to consider several different tasks: 1. Splitting positive edges into train / val / test datasets. 2. Sampling / re-sampling negative edges (i.e. edges not present in the graph). 3. Splitting edges into message passing and supervision edges. With regard to point 3, for edge level data splitting we classify edges into two types. The first is `message passing` edges, edges that are used for message passing by our GNN. The second is `supervision`, edges that are used in the loss function for backpropagation. DeepSNAP allows for two different modes, where the `message passing` and `supervision` edges are either the same or disjoint. ### All Edge Splitting Mode First, we explore the `edge_train_mode="all"` mode for edge level splitting, where the `message passing` and `supervision` edges are shared during training. ``` if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cora' name = 'Cora' pyg_dataset = Planetoid(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Specify task as link_pred for edge-level task task = 'link_pred' # Specify the train mode, "all" mode is default for deepsnap dataset edge_train_mode = "all" dataset = GraphDataset(graphs, task=task, edge_train_mode=edge_train_mode) # Transductive link prediction split dataset_train, dataset_val, dataset_test = dataset.split(transductive=True, split_ratio=[0.8, 0.1, 0.1]) print("Cora train dataset: {}".format(dataset_train)) print("Cora validation dataset: {}".format(dataset_val)) print("Cora test dataset: {}".format(dataset_test)) ``` In DeepSNAP, the indices of supervision edges are stored in the `edge_label_index` tensor and the corresponding edge labels are stored in `edge_label` tensor. ``` if 'IS_GRADESCOPE_ENV' not in os.environ: print("Original Cora graph has {} edges".format(dataset[0].num_edges)) print() print("Train set has {} message passing edge".format(dataset_train[0].edge_index.shape[1] // 2)) print("Train set has {} supervision (positive) edges".format(dataset_train[0].edge_label_index.shape[1] // 4)) print() print("Validation set has {} message passing edge".format(dataset_val[0].edge_index.shape[1] // 2)) print("Validation set has {} supervision (positive) edges".format(dataset_val[0].edge_label_index.shape[1] // 4)) print() print("Test set has {} message passing edge".format(dataset_test[0].edge_index.shape[1] // 2)) print("Test set has {} supervision (positive) edges".format(dataset_test[0].edge_label_index.shape[1] // 4)) ``` **Specific things to note in `all` mode**: * At training time: the supervision edges are the same as the training message passing edges. * At validation time: the message passing edges are the training message passing edges and training supervision edges (still the training message passing edges in this case). However, we now include a set of unseen validation supervision edges that are disjoint from the training supervision edges. * At test time: the message passing edges are the union of training message passing edges, training supervision edges, and validation supervision edges. The test supervision edges then disjoint from the training supervision edges and validation supervision edges. * We exclude negative edges in this illustration. However, the attributes `edge_label` and `edge_label_index` naturally also include the negative supervision edges (by default the number of negative edges is the same as the number of positive edges, hence the divide by 4 above). Now, that we have seen the basics of the `all` method for edge splitting, we will implement a function that checks whether two edge index tensors are disjoint and explore more edge splitting properties by using that function. ## Question 3: Implement a function that checks whether two edge_index tensors are disjoint (i.e. do not share any common edges). Then answer the True/False questions below. (5 points) ``` def edge_indices_disjoint(edge_index_1, edge_index_2): # TODO: Implement this function that takes two edge index tensors, # and returns whether these two edge index tensors are disjoint. disjoint = None ############# Your code here ############ ## (~5 lines of code) ## Note ## 1. Here disjoint means that there is no single edge belongs to both edge index tensors ## 2. You do not need to consider the undirected case. For example, if edge_index_1 contains ## edge (a, b) and edge_index_2 contains edge (b, a). We will treat them as disjoint in this ## function. edge_index_1_np = edge_index_1.T.detach().cpu().numpy() edge_index_2_np = edge_index_2.T.detach().cpu().numpy() intercept = [x for x in set(tuple(x) for x in edge_index_1_np) & set(tuple(x) for x in edge_index_2_np)] disjoint = len(intercept) == 0 ######################################### return disjoint if 'IS_GRADESCOPE_ENV' not in os.environ: num_train_edges = dataset_train[0].edge_label_index.shape[1] // 2 train_pos_edge_index = dataset_train[0].edge_label_index[:, :num_train_edges] train_neg_edge_index = dataset_train[0].edge_label_index[:, num_train_edges:] print("3.1 Training (supervision) positve and negative edges are disjoint = {}"\ .format(edge_indices_disjoint(train_pos_edge_index, train_neg_edge_index))) num_val_edges = dataset_val[0].edge_label_index.shape[1] // 2 val_pos_edge_index = dataset_val[0].edge_label_index[:, :num_val_edges] val_neg_edge_index = dataset_val[0].edge_label_index[:, num_val_edges:] print("3.2 Validation (supervision) positve and negative edges are disjoint = {}"\ .format(edge_indices_disjoint(val_pos_edge_index, val_neg_edge_index))) num_test_edges = dataset_test[0].edge_label_index.shape[1] // 2 test_pos_edge_index = dataset_test[0].edge_label_index[:, :num_test_edges] test_neg_edge_index = dataset_test[0].edge_label_index[:, num_test_edges:] print("3.3 Test (supervision) positve and negative edges are disjoint = {}"\ .format(edge_indices_disjoint(test_pos_edge_index, test_neg_edge_index))) print("3.4 Test (supervision) positve and validation (supervision) positve edges are disjoint = {}"\ .format(edge_indices_disjoint(test_pos_edge_index, val_pos_edge_index))) print("3.5 Validation (supervision) positve and training (supervision) positve edges are disjoint = {}"\ .format(edge_indices_disjoint(val_pos_edge_index, train_pos_edge_index))) ``` ### Disjoint Edge Splitting Mode Now we will look at a relatively more complex transductive edge split setting, the `edge_train_mode="disjoint"` mode in DeepSNAP. In this setting, the `message passing` and `supervision` edges are completely disjoint ``` if 'IS_GRADESCOPE_ENV' not in os.environ: edge_train_mode = "disjoint" dataset = GraphDataset(graphs, task='link_pred', edge_train_mode=edge_train_mode) orig_edge_index = dataset[0].edge_index dataset_train, dataset_val, dataset_test = dataset.split( transductive=True, split_ratio=[0.8, 0.1, 0.1]) train_message_edge_index = dataset_train[0].edge_index train_sup_edge_index = dataset_train[0].edge_label_index val_message_edge_index = dataset_val[0].edge_index val_sup_edge_index = dataset_val[0].edge_label_index test_message_edge_index = dataset_test[0].edge_index test_sup_edge_index = dataset_test[0].edge_label_index print("Original Cora graph has {} edges".format(dataset[0].num_edges)) print() print("Train set has {} message passing edge".format(train_message_edge_index.shape[1] // 2)) print("Train set has {} supervision (positive) edges".format(train_sup_edge_index.shape[1] // 4)) print() print("Validation set has {} message passing edge".format(val_message_edge_index.shape[1] // 2)) print("Validation set has {} supervision (positive) edges".format(val_sup_edge_index.shape[1] // 4)) print() print("Test set has {} message passing edge".format(test_message_edge_index.shape[1] // 2)) print("Test set has {} supervision (positive) edges".format(test_sup_edge_index.shape[1] // 4)) ``` **Specific things to note in `disjoint` mode**: * At training time: the training supervision edges are disjoint from the training message passing edges. * At validation time: the message passing edges are the union of training message passing edges and training supervision edges. The validation supervision edges are disjoint from both the training message passing and supervision edges. * At test time: the message passing edges are the training message passing edges, training supervision edges, and validation supervision edges. The test supervision edges are disjoint from all the training and validation edges. ## Negative Edges For edge level tasks, sampling negative edges is critical. Moreover, during each training iteration, we want to resample the negative edges. Below we print the training and validation sets negative edges in two training iterations. What we demonstrate is that the negative edges are only resampled during training. ``` if 'IS_GRADESCOPE_ENV' not in os.environ: dataset = GraphDataset(graphs, task='link_pred', edge_train_mode="disjoint") datasets = {} follow_batch = [] datasets['train'], datasets['val'], datasets['test'] = dataset.split( transductive=True, split_ratio=[0.8, 0.1, 0.1]) dataloaders = { split: DataLoader( ds, collate_fn=Batch.collate(follow_batch), batch_size=1, shuffle=(split=='train') ) for split, ds in datasets.items() } neg_edges_1 = None for batch in dataloaders['train']: num_edges = batch.edge_label_index.shape[1] // 2 neg_edges_1 = batch.edge_label_index[:, num_edges:] print("First iteration training negative edges:") print(neg_edges_1) break neg_edges_2 = None for batch in dataloaders['train']: num_edges = batch.edge_label_index.shape[1] // 2 neg_edges_2 = batch.edge_label_index[:, num_edges:] print("Second iteration training negative edges:") print(neg_edges_2) break neg_edges_1 = None for batch in dataloaders['val']: num_edges = batch.edge_label_index.shape[1] // 2 neg_edges_1 = batch.edge_label_index[:, num_edges:] print("First iteration validation negative edges:") print(neg_edges_1) break neg_edges_2 = None for batch in dataloaders['val']: num_edges = batch.edge_label_index.shape[1] // 2 neg_edges_2 = batch.edge_label_index[:, num_edges:] print("Second iteration validation negative edges:") print(neg_edges_2) break ``` If you are interested in more graph splitting settings, please refer to the DeepSNAP dataset [documentation](https://snap.stanford.edu/deepsnap/modules/dataset.html). ## Graph Transformation and Feature Computation The other core functionality of DeepSNAP is graph transformation / feature computation. In DeepSNAP, we divide graph transformation / feature computation into two different types. The first includes transformations before training (e.g. transform the whole dataset before training directly), and the second includes transformations during training (transform batches of graphs). Below is an example that uses the NetworkX back end to calculate the PageRank value for each node and subsequently transforms the node features by concatenating each nodes PageRank score (transform the dataset before training). ``` def pagerank_transform_fn(graph): # Get the referenced networkx graph G = graph.G # Calculate the pagerank by using networkx pr = nx.pagerank(G) # Transform the pagerank values to tensor pr_feature = torch.tensor([pr[node] for node in range(graph.num_nodes)], dtype=torch.float32) pr_feature = pr_feature.view(graph.num_nodes, 1) # Concat the pagerank values to the node feature graph.node_feature = torch.cat([graph.node_feature, pr_feature], dim=-1) if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cox2' name = 'COX2' pyg_dataset = TUDataset(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) dataset = GraphDataset(graphs, task='graph') print("Number of features before transformation: {}".format(dataset.num_node_features)) dataset.apply_transform(pagerank_transform_fn, update_tensor=False) print("Number of features after transformation: {}".format(dataset.num_node_features)) ``` ## Question 4: Implement a transformation that adds the clustering coefficient of each node to its feature vector and then report the clustering coefficient of the node with index 3 in the graph with index 406 (5 points). ``` def cluster_transform_fn(graph): # TODO: Implement a function that takes an deepsnap graph object and # transform the graph by adding each node's clustering coefficient to its # graph.node_feature representation ############# Your code here ############ ## (~5 lines of code) ## Note ## 1. Compute the clustering coefficient value for each node and ## concat this value to the last dimension of graph.node_feature # Get networkx graph G = graph.G # Calculate clustering coefficient/pagerank using networkx pr = nx.algorithms.cluster.clustering(G) # Transform pagerank value to tensor pr_feature = torch.tensor([pr[node] for node in range(graph.num_nodes)], dtype=torch.float16) pr_feature = pr_feature.view(graph.num_nodes, 1) # concat pagerank values to the node features graph.node_feature = torch.cat([graph.node_feature, pr_feature], dim=-1) ######################################### if 'IS_GRADESCOPE_ENV' not in os.environ: root = './cox2' name = 'COX2' pyg_dataset = TUDataset(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) dataset = GraphDataset(graphs, task='graph') # Transform the dataset dataset.apply_transform(cluster_transform_fn, update_tensor=False) node_idx = 3 graph_idx = 406 node_feature = dataset[graph_idx].node_feature print("The node has clustering coefficient: {}".format(round(node_feature[node_idx][-1].item(), 2))) ``` ### Final Thoughts Apart from transforming the whole dataset before training, DeepSNAP can also transform the graph (usually sampled batches of graphs, `deepsnap.batch.Batch`) during each training iteration. Also, DeepSNAP supports the synchronization of the transformation between the referenced graph objects and tensor representations. For example, you can just update the NetworkX graph object in the transform function and by specifying `update_tensor=True` the internal tensor representations will be automatically updated! For more information, please refer to the DeepSNAP [documentation](https://snap.stanford.edu/deepsnap/). # 4) Edge Level Prediction From the last section, we learned how DeepSNAP trandsuctively splits edges for edge level tasks. For the last part of the notebook, we will use DeepSNAP and PyG together to implement a simple edge level prediction (link prediction) model! Specifically, we will use a 2 layer GraphSAGE embedding model to generate node embeddings, and then compute link predictions through a dot product link prediction head. Namely, given an edge (u, v) with GNN feature embeddings $f_u$ and $f_v$, our link prediction head generates its link prediction as $f_u \cdot f_v$. To give a brief intuition for this dot product link prediction model, we are learning a GNN that embedds nodes such that nodes that have an edge in the graph are closer within the embedding space than nodes that do not have an edge. The dot product provides a proxy for closeness in our embedding space where a high positive dot product indicates that two vectors are more closely aligned (the angle between the vectors is small), whereas a negative dot-product indicates that vectors are unaligned (the angle between the vectors is greater than 90). ``` import copy import torch import numpy as np import networkx as nx import matplotlib.pyplot as plt from deepsnap.graph import Graph from deepsnap.batch import Batch from deepsnap.dataset import GraphDataset from torch_geometric.datasets import Planetoid, TUDataset from torch.utils.data import DataLoader import torch.nn.functional as F from torch_geometric.nn import SAGEConv class LinkPredModel(torch.nn.Module): def __init__(self, input_dim, hidden_dim, num_classes, dropout=0.2): super(LinkPredModel, self).__init__() self.conv1 = SAGEConv(input_dim, hidden_dim) self.conv2 = SAGEConv(hidden_dim, num_classes) self.loss_fn = None ############# Your code here ############# ## (~1 line of code) ## Note ## 1. Initialize the loss function to BCEWithLogitsLoss self.loss_fn = nn.BCEWithLogitsLoss() ########################################## self.dropout = dropout def reset_parameters(self): self.conv1.reset_parameters() self.conv2.reset_parameters() def forward(self, batch): node_feature, edge_index, edge_label_index = batch.node_feature, batch.edge_index, batch.edge_label_index ############# Your code here ############# ## (~6 line of code) ## Note ## 1. Feed the node feature into the first conv layer ## 2. Add a ReLU after the first conv layer ## 3. Add dropout after the ReLU (with probability self.dropout) ## 4. Feed the output to the second conv layer ## 5. Select the embeddings of the source nodes and destination nodes ## by using the edge_label_index and compute the similarity of each pair ## by dot product x = self.conv1(node_feature, edge_index) x = F.relu(x) x = F.dropout(x, p=self.dropout) x = self.conv2(x, edge_index) x_src = x[edge_label_index[0]] x_dst = x[edge_label_index[1]] x_similarity = x_src * x_dst pred = torch.sum(x_similarity, dim=-1) ########################################## return pred def loss(self, pred, link_label): return self.loss_fn(pred, link_label) from sklearn.metrics import * def train(model, dataloaders, optimizer, args): val_max = 0 best_model = model for epoch in range(1, args["epochs"]): for i, batch in enumerate(dataloaders['train']): batch.to(args["device"]) ############# Your code here ############# ## (~6 lines of code) ## Note ## 1. Zero grad the optimizer ## 2. Compute loss and backpropagate ## 3. Update the model parameters optimizer.zero_grad() pred = model(batch) loss = model.loss(pred, batch.edge_label.type_as(pred)) loss.backward() optimizer.step() ########################################## log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}, Loss: {}' score_train = test(model, dataloaders['train'], args) score_val = test(model, dataloaders['val'], args) score_test = test(model, dataloaders['test'], args) print(log.format(epoch, score_train, score_val, score_test, loss.item())) if val_max < score_val: val_max = score_val best_model = copy.deepcopy(model) return best_model def test(model, dataloader, args, save_model_preds=False): model.eval() score = 0 preds = None labels = None ############# Your code here ############# ## (~7 lines of code) ## Note ## 1. Loop through batches in the dataloader (Note for us there is only one batch!) ## 2. Feed the batch to the model ## 3. Feed the model output to sigmoid ## 4. Compute the ROC-AUC score by using sklearn roc_auc_score function ## Note: Look into flattening and converting torch tensors into numpy arrays ## 5. Edge labels are stored in batch.edge_label ## 6. Make sure to save your **numpy** model predictions as 'preds' ## and the **numpy** edge labels as 'labels' # for batch in dataloader: for batch in dataloaders['test']: batch.to(args['device']) preds = model(batch) preds = torch.sigmoid(preds).cpu().detach().numpy() labels = batch.edge_label.cpu().detach().numpy() score += roc_auc_score(labels, preds) score /= len(dataloaders['test']) ########################################## if save_model_preds: print ("Saving Link Classification Model Predictions") print() data = {} data['pred'] = preds data['label'] = labels df = pd.DataFrame(data=data) # Save locally as csv df.to_csv('CORA-Link-Prediction.csv', sep=',', index=False) return score # Please don't change any parameters args = { "device" : 'cuda' if torch.cuda.is_available() else 'cpu', "hidden_dim" : 128, "epochs" : 200, } if 'IS_GRADESCOPE_ENV' not in os.environ: pyg_dataset = Planetoid('./tmp/cora', 'Cora') graphs = GraphDataset.pyg_to_graphs(pyg_dataset) dataset = GraphDataset( graphs, task='link_pred', edge_train_mode="disjoint" ) datasets = {} datasets['train'], datasets['val'], datasets['test']= dataset.split( transductive=True, split_ratio=[0.85, 0.05, 0.1]) input_dim = datasets['train'].num_node_features num_classes = datasets['train'].num_edge_labels model = LinkPredModel(input_dim, args["hidden_dim"], num_classes).to(args["device"]) model.reset_parameters() optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) dataloaders = {split: DataLoader( ds, collate_fn=Batch.collate([]), batch_size=1, shuffle=(split=='train')) for split, ds in datasets.items()} best_model = train(model, dataloaders, optimizer, args) log = "Best Model Accuraies Train: {:.4f}, Val: {:.4f}, Test: {:.4f}" best_train_roc = test(best_model, dataloaders['train'], args) best_val_roc = test(best_model, dataloaders['val'], args) best_test_roc = test(best_model, dataloaders['test'], args, save_model_preds=True) print(log.format(best_train_roc, best_val_roc, best_test_roc)) ``` ## Question 4: What is the maximum ROC-AUC score you get for your best_model on test set? (13 points) After training your model, download and submit your best model prediction file: *CORA-Link-Prediction.csv*. As we have seen before you can view this file by clicking on the *Folder* icon on the left side pannel. # Submission You will need to submit four files on Gradescope to complete this notebook. 1. Your completed *XCS224W_Colab3.ipynb*. From the "File" menu select "Download .ipynb" to save a local copy of your completed Colab. 2. *CORA-Node-GraphSage.csv* 3. *CORA-Node-GAT.csv* 4. *CORA-Link-Prediction.csv* Download the csv files by selecting the *Folder* icon on the left panel. To submit your work, zip the files downloaded in steps 1-4 above and submit to gradescope. **NOTE:** DO NOT rename any of the downloaded files. ``` ```
github_jupyter
``` !python3 -m pip freeze | grep xlrd !python3 -m pip freeze | grep openpy ``` # Использование библиотеки pandas для анализа описаний уязвимостей из банка данных ФСТЭК В статье демонстрируются возможности использования библиотеки pandas для работы с информацией из банка данных ФСТЭК (bdu.fstec.ru) об угрозах (thrlist.xlsx) и уязвимостях (vullist.xlsx) Для работы можно использовать открытый ресурс google colab с уже предустановленным программным обеспечением. pandas xlrd openpyxls ## Загрузка файлов с сайта <a name='load'></a> ## Содержание <a name='toc'></a> <ul> <a href='#load'>Загрузка данных с сайта</a> </ul> <ul> <a href='#thrlist'>Анализ файла угроз thrlist.xlsx</a> </ul> <ul> <a href='#vullist'>Анализ файла уязвимостей vullist.xlsx</a> </ul> <ul> <a href='#refs'>Ссылки</a> </ul> ## Загрузка файлов с сайта <a name='load'></a> На официальном сайте ФСТЭК опубликованы списки угроз (thrlist.xlsx) и уязвимостей (vullist.xlsx) в эксель файлах. Загрузим их. ``` !wget https://bdu.fstec.ru/files/documents/thrlist.xlsx !wget https://bdu.fstec.ru/files/documents/vullist.xlsx ``` Убедимся, что файлы появились в локальной директории. Для этого можно вызвать стаднатрную команду linux для просмотра содержимого директория ls, для этого перед командой в ячейке надо поставить "!". ``` !ls ``` Загружаем библиотеку pandas для работы с таблицами. ``` import pandas as pd ``` Создаем DataFrame из xlsx файла (пропускаем одну строчку, чтобы корректно отображались заголовки) ``` df = pd.read_excel("./thrlist.xlsx", skiprows=1, engine="openpyxl") ``` Отобразим первые три строки датафрейма. ``` df.head(3) ``` Посмотрим размеры ``` df.shape ``` Посмотрим перечень названий столбцов ``` for i,col in enumerate(df.columns): print(i+1,":",col) ``` Итак, мы загрузили описания угроз и уязвимостей с официального сайта ФСТЭК в виде xlsx файлов и на основе их данных создали табличные структуры pandas.DataFrame. Далее будем анализировать содержимое этих таблиц методами библиотеки pandas. <a href='#toc'>Назад к оглавлению</a> ## Анализ файла угроз thrlist.xlsx <a name='thrlist'></a> Посмотрим информацию о столбцах таблицы ``` df.info() ``` Из полученного описания видим, что в таблице 10 столбцов, четыре из них целочисленные, два имеют тип "datetime", остальные являются строками. Также видим, что в столбце "Источник угрозы" есть два пустых значения (NaN). Выведем только те строки, для который угроза связана с нарушением целостности ``` df[df['Нарушение целостности']==1] ``` Выведем только те строки, которые относятся к BIOS ``` df[df['Наименование УБИ'].apply(lambda x: x.find("BIOS"))!=-1] ``` Выведем угрозы за 2019 год ``` df[(df['Дата включения угрозы в БнД УБИ']>'2019-01-01')&(df['Дата включения угрозы в БнД УБИ']<'2020-01-01')] ``` При попытке отфильтровать содержимое таблицы, оставив только те строки, которые содержать слово "высоким" в столбце "Источник угрозы (характеристика и потенциал нарушителя)", возникает ошибка. Причина в том, что в двух ячейках столбца "Источник угрозы (характеристика и потенциал нарушителя)" были два пустых значения (NaN - "Not a number", специальное обозначение для пустого значения). Далее по тексту мы это обнаружили и заменили пустые значения строкой "не задано". ``` df['Источник угрозы (характеристика и потенциал нарушителя)'].isna().sum() df['Источник угрозы (характеристика и потенциал нарушителя)'].fillna("не задано", inplace=True) df['Источник угрозы (характеристика и потенциал нарушителя)'].isna().sum() ``` Теперь все работает, оставим только строчки с высоким потенциалом нарушителя. ``` df[df['Источник угрозы (характеристика и потенциал нарушителя)'].str.contains("высоким")] ``` Двойное условие: 1) BIOS в "Наименовании УБИ" и 2) Нарушение конфиденциальности ``` df_result = df[(df['Наименование УБИ'].apply(lambda x: x.find("BIOS"))!=-1)&(df['Нарушение конфиденциальности']==1)] df_result ``` Запись результата в файл xls. ``` df_result.to_excel("name.xls") !ls ``` Сохранить файл из виртуального диска Google Colab на локальный диск. ``` from google.colab import files files.download("name.xls") !ls ``` <a href="#toc">Назад к оглавлению</a> ## Анализ файла с описанием уязвимостей vullist.xlsx <a name><> ``` df2 = pd.read_excel("vullist.xlsx", skiprows=2) df2 df2.shape df2.columns df2[df2['Вендор ПО'].apply(lambda x:x.find("D-Link"))!=-1] df2['Вендор ПО'].unique().shape df2['Вендор ПО'].value_counts()[:10].plot(kind='bar') import matplotlib.pyplot as plt plt.bar(x=range(10),height=df2['Вендор ПО'].value_counts()[:10]) plt.show() df2[df2['Наименование уязвимости'].apply(lambda x:x.find("облач"))!=-1].shape ``` ## Ссылки <a name='refs'></a> - https://bdu.fstec.ru - https://pandas.pydata.org - https://github.com/yurichernyshov/Data-Science-Course-USURT/blob/master/lessons/100%20questions%20Pandas.ipynb <a href='#toc'>Назад к оглавлению</a>
github_jupyter
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i> <i>Licensed under the MIT License.</i> # LightGBM: A Highly Efficient Gradient Boosting Decision Tree This notebook will give you an example of how to train a LightGBM model to estimate click-through rates on an e-commerce advertisement. We will train a LightGBM based model on the Criteo dataset. [LightGBM](https://github.com/Microsoft/LightGBM) is a gradient boosting framework that uses tree-based learning algorithms. It is designed to be distributed and efficient with the following advantages: * Fast training speed and high efficiency. * Low memory usage. * Great accuracy. * Support of parallel and GPU learning. * Capable of handling large-scale data. ## Global Settings and Imports ``` import sys, os sys.path.append("../../") import numpy as np import lightgbm as lgb import papermill as pm import pandas as pd import category_encoders as ce from tempfile import TemporaryDirectory from sklearn.metrics import roc_auc_score, log_loss import reco_utils.recommender.lightgbm.lightgbm_utils as lgb_utils import reco_utils.dataset.criteo as criteo print("System version: {}".format(sys.version)) print("LightGBM version: {}".format(lgb.__version__)) ``` ### Parameter Setting Let's set the main related parameters for LightGBM now. Basically, the task is a binary classification (predicting click or no click), so the objective function is set to binary logloss, and 'AUC' metric, is used as a metric which is less effected by imbalance in the classes of the dataset. Generally, we can adjust the number of leaves (MAX_LEAF), the minimum number of data in each leaf (MIN_DATA), maximum number of trees (NUM_OF_TREES), the learning rate of trees (TREE_LEARNING_RATE) and EARLY_STOPPING_ROUNDS (to avoid overfitting) in the model to get better performance. Besides, we can also adjust some other listed parameters to optimize the results. [In this link](https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst), a list of all the parameters is shown. Also, some advice on how to tune these parameters can be found [in this url](https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters-Tuning.rst). ``` MAX_LEAF = 64 MIN_DATA = 20 NUM_OF_TREES = 100 TREE_LEARNING_RATE = 0.15 EARLY_STOPPING_ROUNDS = 20 METRIC = "auc" SIZE = "sample" params = { 'task': 'train', 'boosting_type': 'gbdt', 'num_class': 1, 'objective': "binary", 'metric': METRIC, 'num_leaves': MAX_LEAF, 'min_data': MIN_DATA, 'boost_from_average': True, #set it according to your cpu cores. 'num_threads': 20, 'feature_fraction': 0.8, 'learning_rate': TREE_LEARNING_RATE, } ``` ## Data Preparation Here we use CSV format as the example data input. Our example data is a sample (about 100 thousand samples) from [Criteo dataset](https://www.kaggle.com/c/criteo-display-ad-challenge). The Criteo dataset is a well-known industry benchmarking dataset for developing CTR prediction models, and it's frequently adopted as evaluation dataset by research papers. The original dataset is too large for a lightweight demo, so we sample a small portion from it as a demo dataset. Specifically, there are 39 columns of features in Criteo, where 13 columns are numerical features (I1-I13) and the other 26 columns are categorical features (C1-C26). ``` nume_cols = ["I" + str(i) for i in range(1, 14)] cate_cols = ["C" + str(i) for i in range(1, 27)] label_col = "Label" header = [label_col] + nume_cols + cate_cols with TemporaryDirectory() as tmp: all_data = criteo.load_pandas_df(size=SIZE, local_cache_path=tmp, header=header) display(all_data.head()) ``` First, we cut three sets (train_data (first 80%), valid_data (middle 10%) and test_data (last 10%)), cut from the original all data. <br> Notably, considering the Criteo is a kind of time-series streaming data, which is also very common in recommendation scenario, we split the data by its order. ``` # split data to 3 sets length = len(all_data) train_data = all_data.loc[:0.8*length-1] valid_data = all_data.loc[0.8*length:0.9*length-1] test_data = all_data.loc[0.9*length:] ``` ## Basic Usage ### Ordinal Encoding Considering LightGBM could handle the low-frequency features and missing value by itself, for basic usage, we only encode the string-like categorical features by an ordinal encoder. ``` ord_encoder = ce.ordinal.OrdinalEncoder(cols=cate_cols) def encode_csv(df, encoder, label_col, typ='fit'): if typ == 'fit': df = encoder.fit_transform(df) else: df = encoder.transform(df) y = df[label_col].values del df[label_col] return df, y train_x, train_y = encode_csv(train_data, ord_encoder, label_col) valid_x, valid_y = encode_csv(valid_data, ord_encoder, label_col, 'transform') test_x, test_y = encode_csv(test_data, ord_encoder, label_col, 'transform') print('Train Data Shape: X: {trn_x_shape}; Y: {trn_y_shape}.\nValid Data Shape: X: {vld_x_shape}; Y: {vld_y_shape}.\nTest Data Shape: X: {tst_x_shape}; Y: {tst_y_shape}.\n' .format(trn_x_shape=train_x.shape, trn_y_shape=train_y.shape, vld_x_shape=valid_x.shape, vld_y_shape=valid_y.shape, tst_x_shape=test_x.shape, tst_y_shape=test_y.shape,)) train_x.head() ``` ### Create model When both hyper-parameters and data are ready, we can create a model: ``` lgb_train = lgb.Dataset(train_x, train_y.reshape(-1), params=params, categorical_feature=cate_cols) lgb_valid = lgb.Dataset(valid_x, valid_y.reshape(-1), reference=lgb_train, categorical_feature=cate_cols) lgb_test = lgb.Dataset(test_x, test_y.reshape(-1), reference=lgb_train, categorical_feature=cate_cols) lgb_model = lgb.train(params, lgb_train, num_boost_round=NUM_OF_TREES, early_stopping_rounds=EARLY_STOPPING_ROUNDS, valid_sets=lgb_valid, categorical_feature=cate_cols) ``` Now let's see what is the model's performance: ``` test_preds = lgb_model.predict(test_x) auc = roc_auc_score(np.asarray(test_y.reshape(-1)), np.asarray(test_preds)) logloss = log_loss(np.asarray(test_y.reshape(-1)), np.asarray(test_preds), eps=1e-12) res_basic = {"auc": auc, "logloss": logloss} print(res_basic) pm.record("res_basic", res_basic) ``` <script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=default"></script> ## Optimized Usage ### Label-encoding and Binary-encoding Next, since LightGBM has a better capability in handling dense numerical features effectively, we try to convert all the categorical features in original data into numerical ones, by label-encoding [3] and binary-encoding [4]. Also due to the sequence property of Criteo, the label-encoding we adopted is executed one-by-one, which means we encode the samples in order, by the information of the previous samples before each sample (sequential label-encoding and sequential count-encoding). Besides, we also filter the low-frequency categorical features and fill the missing values by the mean of corresponding columns for the numerical features. (consulting `lgb_utils.NumEncoder`) Specifically, in `lgb_utils.NumEncoder`, the main steps are as follows. * Firstly, we convert the low-frequency categorical features to `"LESS"` and the missing categorical features to `"UNK"`. * Secondly, we convert the missing numerical features into the mean of corresponding columns. * Thirdly, the string-like categorical features are ordinal encoded like the example shown in basic usage. * And then, we target encode the categorical features in the samples order one-by-one. For each sample, we add the label and count information of its former samples into the data and produce new features. Formally, for $i=1,2,...,n$, we add $\frac{\sum\nolimits_{j=1}^{i-1} I(x_j=c) \cdot y}{\sum\nolimits_{j=1}^{i-1} I(x_j=c)}$ as a new label feature for current sample $x_i$, where $c$ is a category to encode in current sample, so $(i-1)$ is the number of former samples, and $I(\cdot)$ is the indicator function that check the former samples contain $c$ (whether $x_j=c$) or not. At the meantime, we also add the count frequency of $c$, which is $\frac{\sum\nolimits_{j=1}^{i-1} I(x_j=c)}{i-1}$, as a new count feature. * Finally, based on the results of ordinal encoding, we add the binary encoding results as new columns into the data. Note that the statistics used in the above process only updates when fitting the training set, while maintaining static when transforming the testing set because the label of test data should be considered as unknown. ``` label_col = 'Label' num_encoder = lgb_utils.NumEncoder(cate_cols, nume_cols, label_col) train_x, train_y = num_encoder.fit_transform(train_data) valid_x, valid_y = num_encoder.transform(valid_data) test_x, test_y = num_encoder.transform(test_data) del num_encoder print('Train Data Shape: X: {trn_x_shape}; Y: {trn_y_shape}.\nValid Data Shape: X: {vld_x_shape}; Y: {vld_y_shape}.\nTest Data Shape: X: {tst_x_shape}; Y: {tst_y_shape}.\n' .format(trn_x_shape=train_x.shape, trn_y_shape=train_y.shape, vld_x_shape=valid_x.shape, vld_y_shape=valid_y.shape, tst_x_shape=test_x.shape, tst_y_shape=test_y.shape,)) ``` ### Training and Evaluation ``` lgb_train = lgb.Dataset(train_x, train_y.reshape(-1), params=params) lgb_valid = lgb.Dataset(valid_x, valid_y.reshape(-1), reference=lgb_train) lgb_model = lgb.train(params, lgb_train, num_boost_round=NUM_OF_TREES, early_stopping_rounds=EARLY_STOPPING_ROUNDS, valid_sets=lgb_valid) test_preds = lgb_model.predict(test_x) auc = roc_auc_score(np.asarray(test_y.reshape(-1)), np.asarray(test_preds)) logloss = log_loss(np.asarray(test_y.reshape(-1)), np.asarray(test_preds), eps=1e-12) res_optim = {"auc": auc, "logloss": logloss} print(res_optim) pm.record("res_optim", res_optim) ``` ## Model saving and loading Now we finish the basic training and testing for LightGBM, next let's try to save and reload the model, and then evaluate it again. ``` with TemporaryDirectory() as tmp: save_file = os.path.join(tmp, r'finished.model') lgb_model.save_model(save_file) loaded_model = lgb.Booster(model_file=save_file) # eval the performance again test_preds = loaded_model.predict(test_x) auc = roc_auc_score(np.asarray(test_y.reshape(-1)), np.asarray(test_preds)) logloss = log_loss(np.asarray(test_y.reshape(-1)), np.asarray(test_preds), eps=1e-12) print({"auc": auc, "logloss": logloss}) ``` ## Additional Reading \[1\] Guolin Ke, Qi Meng, Thomas Finley, Taifeng Wang, Wei Chen, Weidong Ma, Qiwei Ye, and Tie-Yan Liu. 2017. LightGBM: A highly efficient gradient boosting decision tree. In Advances in Neural Information Processing Systems. 3146–3154.<br> \[2\] The parameters of LightGBM: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst <br> \[3\] Anna Veronika Dorogush, Vasily Ershov, and Andrey Gulin. 2018. CatBoost: gradient boosting with categorical features support. arXiv preprint arXiv:1810.11363 (2018).<br> \[4\] Scikit-learn. 2018. categorical_encoding. https://github.com/scikit-learn-contrib/categorical-encoding<br>
github_jupyter
### Dataset Lets Load the dataset. We shall use the following datasets: Features are in: "sido0_train.mat" Labels are in: "sido0_train.targets" ``` from scipy.io import loadmat import numpy as np X = loadmat(r"/Users/rkiyer/Desktop/teaching/CS6301/jupyter/data/sido0_matlab/sido0_train.mat") y = np.loadtxt(r"/Users/rkiyer/Desktop/teaching/CS6301/jupyter/data/sido0_matlab/sido0_train.targets") # Statistics of the Dense Format of X X = X['X'].todense() print(X.shape) ``` ### Logistic Regression Definition Lets use the Logistic Regression definition we previously used ``` def LogisticLoss(w, X, y, lam): # Computes the cost function for all the training samples m = X.shape[0] Xw = np.dot(X,w) yT = y.reshape(-1,1) yXw = np.multiply(yT,Xw) f = np.sum(np.logaddexp(0,-yXw)) + 0.5*lam*np.sum(np.multiply(w,w)) gMul = 1/(1 + np.exp(yXw)) ymul = -1*np.multiply(yT, gMul) g = np.dot(ymul.reshape(1,-1),X) + lam*w.reshape(1,-1) g = g.reshape(-1,1) return [f, g] ``` ### Barzelia Borwein step length Lets invoke BB Step Length Gradient Descent ``` from numpy import linalg as LA def gdBB(funObj,w,maxEvals,alpha,gamma,X,y,lam, verbosity, freq): [f,g] = funObj(w,X,y,lam) funEvals = 1 funVals = [] f_old = f g_old = g funVals.append(f) numBackTrack = 0 while(1): wp = w - alpha*g [fp,gp] = funObj(wp,X,y,lam) funVals.append(f) funEvals = funEvals+1 backtrack = 0 if funEvals > 2: g_diff = g - g_old alpha = -alpha*np.dot(g_old.T, g_diff)[0,0]/np.dot(g_diff.T, g_diff)[0,0] while fp > f - gamma*alpha*np.dot(g.T, g): alpha = alpha*alpha*np.dot(g.T, g)[0,0]/(2*(fp + np.dot(g.T, g)[0,0]*alpha - f)) wp = w - alpha*g [fp,gp] = funObj(wp,X,y,lam) funVals.append(f) funEvals = funEvals+1 numBackTrack = numBackTrack + 1 f_old = f g_old = g w = wp f = fp g = gp optCond = LA.norm(g, np.inf) if ((verbosity > 0) and (funEvals % freq == 0)): print(funEvals,alpha,f,optCond) if (optCond < 1e-2): break if (funEvals >= maxEvals): break return (funVals,numBackTrack) [nSamples,nVars] = X.shape w = np.zeros((nVars,1)) (funV1,numBackTrack) = gdBB(LogisticLoss,w,250,1,1e-4,X,y,1,1,10) print(len(funV1)) print("Number of Backtrackings = " + str(numBackTrack)) ``` ### Conjugate Gradient Descent Nonlinear Conjugate Gradient Descent ``` from numpy import linalg as LA def gdCG(funObj,w,maxEvals,alpha,gamma,X,y,lam, verbosity, freq): [f,g] = funObj(w,X,y,lam) funEvals = 1 funVals = [] f_old = f g_old = g funVals.append(f) numBackTrack = 0 d = g while(1): wp = w - alpha*d [fp,gp] = funObj(wp,X,y,lam) funVals.append(f) funEvals = funEvals+1 backtrack = 0 if funEvals > 2: alpha = min(1,2*(f_old - f)/np.dot(g.T, g)[0,0]) beta = np.dot(g.T, g)[0,0]/np.dot(g_old.T, g_old)[0,0] d = g + beta*d else: d = g while fp > f - gamma*alpha*np.dot(g.T, d)[0,0]: alpha = alpha*alpha*np.dot(g.T, d)[0,0]/(2*(fp + np.dot(g.T, d)[0,0]*alpha - f)) wp = w - alpha*d [fp,gp] = funObj(wp,X,y,lam) funVals.append(f) funEvals = funEvals+1 numBackTrack = numBackTrack + 1 f_old = f g_old = g w = wp f = fp g = gp optCond = LA.norm(g, np.inf) if ((verbosity > 0) and (funEvals % freq == 0)): print(funEvals,alpha,f,optCond) if (optCond < 1e-2): break if (funEvals >= maxEvals): break return (funVals,numBackTrack) [nSamples,nVars] = X.shape w = np.zeros((nVars,1)) (funV1,numBackTrack) = gdCG(LogisticLoss,w,250,1,1e-4,X,y,1,1,10) print(len(funV1)) print("Number of Backtrackings = " + str(numBackTrack)) ```
github_jupyter
# Singleton Networks ``` import qualreas as qr import os import copy qr_path = os.path.join(os.getenv('PYPROJ'), 'qualreas') alg_dir = os.path.join(qr_path, "Algebras") ``` ## Make a Test Network ``` test1_net_dict = { 'name': 'Network Copy Test #1', 'algebra': 'Extended_Linear_Interval_Algebra', 'description': 'Testing/Developing network copy functionality', 'nodes': [ ['U', ['ProperInterval', 'Point']], ['V', ['ProperInterval', 'Point']], ['W', ['ProperInterval']], ['X', ['Point']] ], 'edges': [ ['U', 'V', 'B'], ['U', 'W', 'M'], ['W', 'V', 'O'], ['X', 'W', 'D'] ] } test2_net_dict = { 'name': 'Network Copy Test #2', 'algebra': 'Extended_Linear_Interval_Algebra', 'description': 'Testing/Developing network copy functionality', 'nodes': [ ['X', ['ProperInterval']], ['Y', ['ProperInterval']], ['Z', ['ProperInterval']] ], 'edges': [ ['X', 'Y', 'B'], ['Y', 'Z', 'B'] ] } test1_net = qr.Network(algebra_path=alg_dir, network_dict=test1_net_dict) test2_net = qr.Network(algebra_path=alg_dir, network_dict=test2_net_dict) test1_net.propagate() test1_net.summary(show_all=False) test2_net.propagate() test2_net.summary(show_all=False) ``` ## Test Changing Constraint on an Edge Look at all the edge contraints ``` for eg in test1_net.edges: print(test1_net.edges[eg[0], eg[1]]['constraint']) ``` Grab the Head (src) and Tail (tgt) of the 3rd edge, above. ``` src, tgt = list(test1_net.edges)[2] test1_net.edges[src,tgt]['constraint'] ``` Change the constraint and look at the result on the edge & its converse. ``` test1_net.set_constraint(src, tgt, test1_net.algebra.relset('D|M|FI')) test1_net.edges[src,tgt]['constraint'] test1_net.edges[tgt,src]['constraint'] ``` ## Test Copy Network ``` test1_net_copy = test1_net.copy() #test1_net_copy = qr.copy(test1_net) test1_net_copy.summary() test1_net_copy.propagate() test1_net_copy.summary(show_all=False) done = [] result = [] for eg in test1_net_copy.edges: src = eg[0]; tgt = eg[1] srcID = src.name; tgtID = tgt.name if not (src, tgt) in done: cons = test1_net_copy.edges[src, tgt]['constraint'] print(srcID, tgtID, cons) if len(cons) > 1: result.append((srcID, tgtID, cons)) done.append((tgt, src)) rels = [] for rel in result[0][2]: rels.append(rel) rels foo = [1, 2, 3] a = foo.pop() a foo def _all_realizations_aux(in_work, result): if len(in_work) == 0: print("DONE") return result else: print("Get next net in work") next_net = in_work.pop() if finished(next_net): print(" This one's finished") result.append(next_net) _all_realizations_aux(in_work, result) else: print(" Expanding net") _all_realizations_aux(in_work + expand(next_net), result) def expand(net): expansion = [] for src, tgt in net.edges: edge_constraint = net.edges[src, tgt]['constraint'] if len(edge_constraint) > 1: print("--------") print(f"Edge Constraint: {edge_constraint}") for rel in edge_constraint: print(f" Relation: {rel}") net_copy = net.copy() src_node, tgt_node, _ = net_copy.get_edge(src.name, tgt.name, return_names=False) net_copy.set_constraint(src_node, tgt_node, net_copy.algebra.relset(rel)) expansion.append(net_copy) print(f" Expansion: {expansion}") break return expansion def finished(net): """Returns True if all constraints are singletons.""" answer = True for src, tgt in net.edges: edge_constraint = net.edges[src, tgt]['constraint'] if len(edge_constraint) > 1: answer = False break return answer x = _all_realizations_aux([test1_net_copy], list()) len(x) foo = expand(test1_net) foo foo[0].summary(show_all=False) foo[1].summary(show_all=False) foo[2].summary(show_all=False) finished(test1_net) finished(test2_net) ```
github_jupyter
``` import pandas import numpy as np import sklearn from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split import glob ``` # San Francisco State University ## Software Engineering Team Assessment and Prediction (SETAP) Project Machine Learning Training Data File Version 0.7 ==================================================================== # Copyright 2000-2017 by San Francisco State University, Dragutin Petkovic, and Marc Sosnick-Perez. # CONTACT ------- ## Professor Dragutin Petkovic: [email protected] # LICENSE ------- This data is released under the Creative Commons Attribution- NonCommercial 4.0 International license. For more information, please see http://creativecommons.org/licenses/by-nc/4.0/legalcode. The research that has made this data possible has been funded in part by NSF grant NSF-TUES1140172. YOUR FEEDBACK IS WELCOME ------------------------ We are interested in how this data is being used. If you use it in a research project, we would like to know how you are using the data. Please contact us at [email protected]. # FILES INCLUDED IN DISTRIBUTION PACKAGE ================================== More data about the SETAP project, data collection, and description and use of machine learning to analyze the data can be found in the following paper: D. Petkovic, M. Sosnick-Perez, K. Okada, R. Todtenhoefer, S. Huang, N. Miglani, A. Vigil: "Using the Random Forest Classifier to Assess and Predict Student Learning of Software Engineering Teamwork". Frontiers in Education FIE 2016, Erie, PA, 2016 See DATA DESCRIPTION below for more information about the data. The README file (which you are reading) contains project information such as data collection techniques, data organization and field naming convention. In addition to the README file, the archive contains a number of .csv files. Each of these CSV files contains data aggregated by team from the project (see below), paired with that team's outcome for either the process or product component of the team's evaluation. The files are named using the following convention: setap[Process|Product]T[1-11].csv For example, the file setapProcessT5.csv contains the data for all teams for time interval 5, paired with the outcome data for the Process component of the team's evaluation. Detailed information about the exact format of the .csv file may be found in the csv files themselves. # DATA DESCRIPTION ==================================================================== The following is a detailed description of the data contained in the accompanying files. ### INTRODUCTION ------------ The data contained in these files were collected over a period of several semesters from students engaged in software engineering classes at San Francisco State University (class sections of CSC 640, CSC 648 and CSC 848). All students consented to this data being shared for research purposes provided no uniquely identifiable information was contained in the distributed files. The information was collected through various means, with emphasis being placed on the collection of objective, quantifiable information. For more information on the data collection procedures, please see the paper referenced above. ### PRIVACY ------- The data contained in this file does not contain any information which may be individually traced to a particular student who participated in the study. # BRIEF DESCRIPTION OF DATA SOURCES AND DERIVATIONS ------------------------------------------------- SAMs (Student Activity Measure) are collected for each student team member during their participation in a software engineering class. Student teams work together on a final class project, and comprise 5-6 students. Teams that are made up of students from only one school are labeled local teams. Teams made up of students from more than one school are labeled global teams. SAMs are collected from: weekly timecards, instructor observations, and software engineering tool usage logs. SAMs are then aggregated by team and time interval (see next section) into TAMs (Team Activity Measure). Outcomes are determined at the end of the semester through evaluation of student team work in two categories: software engineering process (how well the team applied best software engineering practices), and software engineering product (the quality of the finished product the team produced). Thus for each team, two outcomes are determined, process and product, respectively. Outcomes are classified into two class grades, A or F. A represents teams that are at or above expectations, F represents teams that are below expectations or need attention. For more information, please see the paper referenced above. The SE process and SE product outcomes represent ML training classes# and are to be considered separately, e.g. one should train ML for SE process separately from training for SE product. ``` path ='data/SETAP PRODUCT DATA' allFiles = glob.glob(path + "/*.csv") frame = pandas.DataFrame() list_ = [] for file_ in allFiles: df = pandas.read_csv(file_,index_col=None, header=0) list_.append(df) frame = pandas.concat(list_) data = pandas.DataFrame.from_csv("data/SETAP PRODUCT DATA/setapProductT1.csv") # full_data=True will let explore the whole dataset (T1-T11) full_data = True if (full_data): data = frame labels = data['productLetterGrade'] features = data.drop('productLetterGrade', axis=1) #Drop certain features if (full_data): features = features.drop([col for col in features.columns if 'Total' in col], axis=1) features = features.drop([col for col in features.columns if 'Count' in col], axis=1) features = features.drop([col for col in features.columns if 'Student' in col], axis=1) #features = features.drop('femaleTeamMembersPercent', axis=1) # Rename strings in data to appropriate integers, labels to booleans mapping = {'F': False, 'A': True} features_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1} features = pandas.DataFrame(features) labels = pandas.DataFrame(labels) labels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s) #features.dropna(axis='columns', how='any', inplace=True) features.fillna(1, inplace=True) features = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s) X_train, X_test, y_train, y_test = train_test_split(features, labels, random_state=1, train_size=0.4) rfc = RandomForestClassifier(n_estimators= 1000, max_features=0.25, max_depth=50, oob_score=True, n_jobs=-1) rfc.fit(X_train, y_train.values.ravel()) print ('Accuracy score: ' + str(round(rfc.score(X_test, y_test.values.ravel()),3)*100) + '%') import matplotlib.pyplot as plt n_features = len(features.columns) plt.figure(figsize=(5,n_features/5)) plt.barh(range(n_features), rfc.feature_importances_, align='center') plt.yticks(np.arange(n_features), features.columns) plt.xlabel('Full Feature Importance of ' + str(n_features) + ' features') plt.ylabel('Feature') plt.show() features.columns[np.argmax(rfc.feature_importances_)] print ( "Top important features:") count = 1 for string in features.columns[rfc.feature_importances_.argsort()[-6:][::-1]] : print(str(count) + '. ' + string ) count+=1 print("Full dataset test set accuracy") pandas.crosstab(y_test['productLetterGrade'], rfc.predict(X_test), rownames=['Actual'], colnames=['Predicted']) #pandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted']) #Drop certain features if (full_data): data = pandas.DataFrame.from_csv("data/SETAP PRODUCT DATA/setapProductT1.csv") labels = data['productLetterGrade'] features = data.drop('productLetterGrade', axis=1) features = features.drop([col for col in features.columns if 'Total' in col], axis=1) features = features.drop([col for col in features.columns if 'Count' in col], axis=1) features = features.drop([col for col in features.columns if 'Student' in col], axis=1) #features = features.drop('femaleTeamMembersPercent', axis=1) # Rename strings in data to appropriate integers, labels to booleans mapping = {'F': False, 'A': True} features_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1} features = pandas.DataFrame(features) labels = pandas.DataFrame(labels) labels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s) #features.dropna(axis='columns', how='any', inplace=True) features.fillna(1, inplace=True) features = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s) print ('T1 Accuracy score: ' + str(round(rfc.score(features, labels.values.ravel()),3)*100) + '%') pandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted']) data = pandas.DataFrame.from_csv("data/SETAP PRODUCT DATA/setapProductT2.csv") labels = data['productLetterGrade'] features = data.drop('productLetterGrade', axis=1) #Drop certain features if (full_data): features = features.drop([col for col in features.columns if 'Total' in col], axis=1) features = features.drop([col for col in features.columns if 'Count' in col], axis=1) features = features.drop([col for col in features.columns if 'Student' in col], axis=1) #features = features.drop('femaleTeamMembersPercent', axis=1) # Rename strings in data to appropriate integers, labels to booleans mapping = {'F': False, 'A': True} features_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1} features = pandas.DataFrame(features) labels = pandas.DataFrame(labels) labels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s) #features.dropna(axis='columns', how='any', inplace=True) features.fillna(1, inplace=True) features = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s) print ('T2 Accuracy score: ' + str(round(rfc.score(features, labels.values.ravel()),3)*100) + '%') pandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted']) data = pandas.DataFrame.from_csv("data/SETAP PRODUCT DATA/setapProductT3.csv") labels = data['productLetterGrade'] features = data.drop('productLetterGrade', axis=1) #Drop certain features if (full_data): features = features.drop([col for col in features.columns if 'Total' in col], axis=1) features = features.drop([col for col in features.columns if 'Count' in col], axis=1) features = features.drop([col for col in features.columns if 'Student' in col], axis=1) #features = features.drop('femaleTeamMembersPercent', axis=1) # Rename strings in data to appropriate integers, labels to booleans mapping = {'F': False, 'A': True} features_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1} features = pandas.DataFrame(features) labels = pandas.DataFrame(labels) labels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s) #features.dropna(axis='columns', how='any', inplace=True) features.fillna(1, inplace=True) features = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s) print ('T3 Accuracy score: ' + str(round(rfc.score(features, labels.values.ravel()),3)*100) + '%') pandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted']) data = pandas.DataFrame.from_csv("data/SETAP PRODUCT DATA/setapProductT6.csv") labels = data['productLetterGrade'] features = data.drop('productLetterGrade', axis=1) #Drop certain features if (full_data): features = features.drop([col for col in features.columns if 'Total' in col], axis=1) features = features.drop([col for col in features.columns if 'Count' in col], axis=1) features = features.drop([col for col in features.columns if 'Student' in col], axis=1) #features = features.drop('femaleTeamMembersPercent', axis=1) # Rename strings in data to appropriate integers, labels to booleans mapping = {'F': False, 'A': True} features_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1} features = pandas.DataFrame(features) labels = pandas.DataFrame(labels) labels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s) #features.dropna(axis='columns', how='any', inplace=True) features.fillna(1, inplace=True) features = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s) print ('T3 Accuracy score: ' + str(round(rfc.score(features, labels.values.ravel()),3)*100) + '%') pandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted']) ```
github_jupyter
# Estadísticos principales - Esperanzas, varianza y ley débil de los grandes números - Variables aleatorias especiales ## Esperanza La esperanza o valor esperado de una v.a. $X$ se denota $E[X]$ y se calcula como: $\begin{array}{ll} E[X] = \left\{\begin{array}{ll} \sum_i x_i P(X=x_i) & si\,X\, discreta\\ \int x f_X(x)dx & si\,X\, continua\\ \end{array} \right .\\ \end{array}$ Consideremos $g$ una función a valores reales, entonces: $\begin{array}{lll} E[g(X)] & = & \left\{\begin{array}{ll} \sum_i g(x_i) P(X=x_i) & si\,X\, discreta\\ \int g(x) f_X(x)dx & si\,X\, continua\\ \end{array}\right .\\ \end{array}$ Para el caso especial de $g(x) = x^n$ se define el n-ésimo momento de X como: $\begin{array}{lll} E[X^n] & = & \left\{\begin{array}{ll} \sum_i x_i^n P(X=x_i) & si\,X\, discreta\\ \int x^n f_X(x)dx & si\,X\, continua\\ \end{array}\right .\\ \end{array}$ La esperanza es el primer momento y se denota $\mu$. **Propiedades** Sean $a,b \in \cal{R}$ entonces: $\begin{array}{lll} E[aX+b] & = & aE[X] + b \\ E[X + Y] & = & E[X] + E[Y]\\ \end{array}$ ## Varianza y covarianza La varianza mide la variación de la v.a. entorno a la esperanza o media $\mu$, y se define como $\begin{equation} \begin{array}{ll} Var(X) = E[(X-\mu)^2] = E[X^2] - \mu^2 \end{array} \end{equation}$ Se cumple que: $\begin{equation} \begin{array}{ll} Var(aX+b) = a^2 Var(X) \end{array} \end{equation}$ Se define además la desviación estándar $\sigma = \sqrt{Var(X)}$ La covarianza mide la relación (lineal) que hay entre dos v.a. $X$ e $Y$. Si denotamos $\mu_X = E[X]$ y $ \mu_Y= E[Y]$ entonces: $\begin{equation} \begin{array}{lll} Cov(X,Y) & = & E[(X-\mu_X)(Y-\mu_y)] \end{array} \end{equation}$ La correlación es una medida normalizada: $\begin{equation} \begin{array}{lll} Corr(X,Y) & = & \frac{Cov(X,Y)}{Var(X) Var(Y)} \end{array} \end{equation}$ **Propiedades** $\begin{array}{lll} Cov(X,Y) & = & Cov(Y,X) \\ Cov(X,X) & = & Var(X)\\ Cov(X+Z,Y) & = & Cov(X,Y) + Cov(Z,Y)\\ Cov(\sum_i \limits X_i,Y) & = & \sum_i \limits Cov(X_i,Y)\\ Var(X+Y) & = & Var(X) + Var(Y) + 2Cov(X,Y)\\ Var(\sum_i \limits X_i) & = & \sum_i \limits Var(X_i) + \sum_i \limits \sum_{j\neq i} \limits Cov(X_i,X_j) \end{array}$ ## Otros estadísticos $\begin{array}{lll} \text{ Asimetría (skewness) } & = & \frac{E[(X-\mu)^3]}{\sigma^3} = \frac{E[X^3]-3\mu\sigma^2 - \mu^3}{\sigma^3}\\ &&\\ \text{ Curtosis }& = &\frac{E[(X-\mu)^4]}{\sigma^4} = \frac{E[X^4] - 4\mu E[X^3] + 6\mu^2\sigma^2 + 3\mu^4}{\sigma^4}\\ \end{array}$
github_jupyter
# Linear Algebra with Python and NumPy ``` # First, we need to import the package NumPy, which is the library enabling all the fun with algebraic structures. from numpy import * ``` ## Complex Numbers A complex number is a number of the form $z = x + jy$, where $x$ and $y$ are real numbers and $j$ is the **_imaginary unit_**, satisfying $j^2 = −1$. Note that the imaginary unit, often denoted as $i$, is denoted as $j$ in Python. The set $\mathbb{C}$ of all complex numbers can be actually defined as the set of ordered pairs of real numbers $\{(x,y) \mid x,y\in\mathbb{R} \}$ that satisfies the following operations <img src="https://betterexplained.com/wp-content/uploads/complex/complex_conjugates.png" style="float:right"/> - *addition:* $(a,b)+(c,d) = (a+c,b+d)$ - *multiplication:* $(a,b)\cdot(c,d) = (ac-bd,ad+bc)$ Then, it is just a matter of notation to express a complex number as $(x, y)$ or as $x + jy$. When we have a complex number $z\in\mathbb{C}$, we can denote its real and imaginary part as $$ x = \Re(z), \quad y = \Im(z). $$ The **_complex conjugate_** of the complex number $z = x + jy$ is denoted by either $\bar{z}$ or $z^*$ and defined as $$\bar{z} = x − jy .$$ The **_absolute value_** (or modulus or magnitude) of a complex number $z = x + jy$ is $$ | z | = \sqrt{x^2+y^2} = \sqrt{z \bar{z}} .$$ ``` z = 3 + 4j # Define complex number z print('z =', z) print('Re(z) =', real(z)) # Get real part of z print('Im(z) =', imag(z)) # Get imaginary part of z print('|z| =', abs(z)) # Get absolute value of z ``` Note that to obtain $j=\sqrt{-1}$ we must write the argument of `sqrt` function as a complex number (even if has zero imaginary part), otherwise Python tries to compute sqrt on real numbers and throws an error. ``` z = sqrt(-1+0j) print('sqrt(-1) =', z) ``` ## Vectors and Matrices Using NumPy we can define vectors and matrices with both real or complex elements. Although, in contrast to Matlab, where matrix is the default type, in Python we need to define vectors and matrices as `array` or `matrix` type from NumPy package. <img src="http://www.math.cornell.edu/~mec/Winter2009/RalucaRemus/Lecture1/Images/matrix.gif"/> ``` a = array([10,20,30]) # Define a vector of size 3 using type 'array' print(a) print(a.shape) # Size/shape of vector b = matrix('10 20 30') # Define a vector of size 3 using type 'matrix' print(b) print(b.shape) # Size/shape of vector c = linspace(10,20,6) # Define vector as 6 values evenly spaced from 10 to 20 print(c) ``` Note that matrix and array elements in Python are indexed from 0, in contrast to Matlab where indexing starts from 1. ``` print(c[:]) # Get all elements print(c[0]) # The first element print(c[-1]) # The last element print(c[:3]) # The first 3 elements print(c[-3:]) # The last 3 elemnets print(c[2:4]) # 2:4 selects elements of indexes 2 and 3 ``` **_Euclidean norm_** of vector is returned by method `numpy.linalg.norm` ``` norm = linalg.norm(a) # Euclidean norm of vector a print('a =', a) print('norm(a) =', norm) x = a/linalg.norm(a) # Make normalized/unit vector from a print('x =', x) print('norm(x) =', linalg.norm(x)) ``` **_Transposition_** of vectors is not so intuitive as in Matlab, especially if a vector is defined as 1D `array` and you cannot distinguish between row and column vector. However, using the keyword `newaxis` it's possible to shape the vector into 2D array (as matrix of size $1 \times n$ or $n \times 1$), where transposition makes sense and can be obtained by attribute `.T`. ``` x = a[:,newaxis] # Make column vector from vector a (defined as array) print(x) print(x.shape) # Now size of column vector is 3x1 print(x.T) # Make row vector by transpostion of column vector ``` If a vector was defined as 2D array of type `matrix`, transportation is not a problem. ``` x = b.T # Make column vector from vector b (defined as matrix) print(x) print(x.shape) # Now size of column vector is 3x1 print(x.T) # Make row vector by transpostion of column vector ``` **_Matrices_** can be defined as 2D arrays of type `array` or `matrix` (there is no problem with transposition with any type). ``` A = array([[11,12,13], [21,22,23], [31,32,33]]) # Define matrix of size 3x3 as 2D 'array-type' print(A) print(A.shape) B = matrix('11 12 13; 21 22 23; 31 32 33') # Define matrix of size 3x3 as 'matrix-type' print(B) print(B.shape) print(B[0,1]) # Get matrix element at row 0, column 1 print(B[0,:]) # Get 1st row of matrix (A[0] returns also 1st row) print(B[:,0]) # Get 1st column of matrix print(A[:,0]) # Note that column from 'array-type' matrix is returned as 1D array print(B[:,0]) # Column from 'matrix-type' matrix is returned as true column as expected ``` NumPy can generate some essential matrices exactly like Matlab. ``` print('3x3 Matrix full of zeros:') print(zeros([3,3])) print('\n3x3 Matrix full of ones:') print(ones([3,3])) print('\n3x3 identity matrix:') print(eye(3)) print('\n3x3 diagonal matrix:') x = array([1.,2.,3.]) print(diag(x)) print('\n3x3 random matrix:') print(random.rand(3,3)) ``` For merging matrices or vectors methods `numpy.hstack` and `numpy.vstack` can be used. ``` print(vstack([ A, ones([1,3]) ])) # Add row vector to matrix print(hstack([ A, ones([3,1]) ])) # Add column vector to matrix print(hstack([ A, eye(3) ])) # Merge two matrices horizontally ``` ## Operations with Matrices **_Matrix transposition_** is obtained by attribute `.T` ``` X = ones([2,5]) # Generate 2x5 matrix full of ones Y = X.T # Obtain transpose of matrix X print('Matrix X of size', X.shape, ':\n', X) print('\nMatrix Y=X.T of size', Y.shape, ':\n', Y) ``` **_Hermitian transpose_** (or conjugate transpose) of complex matrix $\mathbf{A}\in\mathbb{C}^{m\times n}$ is obtained by taking the transpose of $\mathbf{A}$ and then taking the complex conjugate of each element. Note that for real matrices Hermitian transpose and plain transpose does not differ. In NumPy this kind of transposition is obtained by attribute `.H` (exists only for matrix type). ``` X = matrix((3+4j)*ones([2,5])) # Generate matrix full of complex elements 3+4j Y = X.H # Obtain Hermitian transpose of matrix X print('Matrix X of size', X.shape, ':\n', X) print('\nMatrix Y=X.H of size', Y.shape, ':\n', Y) ``` **_Matrix multiplication_** must be executed by method for dot product `numpy.dot`. Operator `*` produces only element-wise multiplication in Python. ``` print('Matrix A:') print(A) print('\nMatrix B:') B = ones([3,3]) print(B) print('\nElement-wise multiplication A*B:') print(A*B) print('\nMatrix multiplication A by B:') print(dot(A,B)) print('\nMatrix multiplication B by A:') print(dot(B,A)) ``` There are also methods for essential matrix features like **_Frobenius norm_**, **_rank_** or **_determinant_**. ``` print('Matrix A of size', A.shape, ':\n', A) # Frobenius norm of matrix print('\nFrobenius norm: ||A|| =', linalg.norm(A)) # Rank of matrix print('rank(A) =', linalg.matrix_rank(A)) # Determinant of matrix print('det(A) =', linalg.det(A)) ``` In example above, note that the matrix $\mathbf{A}$ is a singular matrix, because its rank is lower than number of its rows, thus also its detemninat is zero. ## Conclusion As we can see from this article, Python and NumPy package can be used to perform all the usual matrix manipulations. There are only few annoying things one need to keep in mind when writing Python code. For example, operator `*` applied to matrices doesn't produce matrix product, but only element-wise multiplication. Or vectors, many methods return them just as 1D `array`, so we need to convert them into 2D `array` or `matrix` type first, to be able to distinguish between row and column vector. ### References: - [Complex numbers](https://en.wikipedia.org/wiki/Complex_number) - [Vectors](https://en.wikipedia.org/wiki/Coordinate_vector) - [Matrix][1] - [Hermitian transpose](https://en.wikipedia.org/wiki/Conjugate_transpose) - [Linear algebra](https://en.wikipedia.org/wiki/Linear_algebra) - [Vector space](https://en.wikipedia.org/wiki/Vector_space) - [NumPy documentation](http://docs.scipy.org/doc/numpy/) - [NumPy for Matlab users](https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html) - [Matplotlib documentation](http://matplotlib.org/) [1]:https://en.wikipedia.org/wiki/Matrix_(mathematics)
github_jupyter
# Two Market Makers - via Pontryagin This notebook corresponds to section 4 (**Agent based models**) of "Market Based Mechanisms for Incentivising Exchange Liquidity Provision" available [here](https://vega.xyz/papers/liquidity.pdf). It models two market makers and solves the resulting game by an iterative scheme based on the Pontryagin optimality principle. ``` import math, sys import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm from os import path count = 0 from matplotlib.backends.backend_pdf import PdfPages T = 0.4; sigma0 = 3 sigma1 = 0.5 lambd = 0.1 r = 0.0 rRisk0 = 0.3 rRisk1 = 0.1 delta_a = 1e-4 fee_scaling = 0.1 # This is key; how does instantenaous trading volume react # to market making stake # and to fees. You could specify different beleifs for the two different agents. def fee_volume_response(f): f = np.maximum(f, np.zeros(np.size(f))) f = np.minimum(f, np.ones(np.size(f))) return 1.0/(f+0.01) - f def stake_volume_response(S): return 1.0 / (1+np.exp(-0.05*S+2)) - 1.0 / (1+np.exp(2)) # Check that the shape below is concave (i.e. there is a single maximum) we need # this if we want the optimization procedure to converge x_span = np.linspace(0,1, 1000) y = fee_scaling * fee_volume_response(x_span) * x_span print('Max %f' % max(y)) max_idx=np.argmax(y) plt.xlabel('fee in %') plt.ylabel('volume in %') plt.title('Fee response times fee') plt.plot(x_span,y) # Check that the shape below is concave (i.e. there is a single maximum) we need # this if we want the optimization procedure to converge. # Of course you may be lucky and things will work even in the case when it's not exactly convex... x_span = np.linspace(0,200, 200) y = stake_volume_response(x_span) plt.xlabel('stake') plt.ylabel('volume in %') plt.title('Stake response') plt.plot(x_span,y) # As things are set-up at moment the agents only differ in their belief about # the maximum trading volume they'd expect to see def trading_volume0(f,S): N_max = 10000 return N_max * fee_volume_response(f) * stake_volume_response(S) def trading_volume1(f,S): N_max = 50000 return N_max * fee_volume_response(f) * stake_volume_response(S) def running_gain0(t,f,S0,S1,a0): frac = S0/(S0+S1) stake = S0+S1 return np.exp(-r*t) * (frac * fee_scaling * f * trading_volume0(f,stake) - max(lambd * sigma0 * S0,0)) - max(np.exp(rRisk0*t)*S0, 0) \ - delta_a * a0*a0 def running_gain1(t,f,S0,S1,a1): frac = S1/(S0+S1) stake = S0+S1 return np.exp(-r*t) * (frac * fee_scaling * f * trading_volume1(f,stake) - max(lambd * sigma1 * S1,0)) - max(np.exp(rRisk1*t)*S1, 0) \ - delta_a * a1*a1 def running_gain_x_0(t,x,S_1, a0): f = x[0] S_0 = x[1] return running_gain0(t,f,S_0,S_1, a0) def running_gain_x_1(t,x,S_0, a1): f = x[0] S_1 = x[1] return running_gain1(t,f,S_0,S_1, a1) # Below we define the gradients (using finite difference) # of the running gain specified above - this is just a technicality # used in the subsequent optimization. def grad_x_of_running_gain_0(t,x,S1,a): delta = 1e-8 grad = np.zeros(2) #print(x) x_plus = x + np.array([delta, 0]) x_minus = x - np.array([delta, 0]) rg_plus = running_gain_x_0(t,x_plus,S1,a) rg_minus = running_gain_x_0(t,x_minus,S1,a) #print(x_plus) grad[0] = (rg_plus - rg_minus)/(2*delta) x_plus = x + np.array([0, delta]) x_minus = x - np.array([0, delta]) rg_plus = running_gain_x_0(t,x_plus,S1,a) rg_minus = running_gain_x_0(t,x_minus,S1,a) grad[1] = (rg_plus - rg_minus)/(2*delta) return grad def grad_x_of_running_gain_1(t,x,S0,a): delta = 1e-8 grad = np.zeros(2) x_plus = x + np.array([delta, 0]) x_minus = x - np.array([delta, 0]) rg_plus = running_gain_x_1(t,x_plus,S0,a) rg_minus = running_gain_x_1(t,x_minus,S0,a) grad[0] = (rg_plus - rg_minus)/(2*delta) x_plus = x + np.array([0, delta]) x_minus = x - np.array([0, delta]) rg_plus = running_gain_x_1(t,x_plus,S0,a) rg_minus = running_gain_x_1(t,x_minus,S0,a) grad[1] = (rg_plus - rg_minus)/(2*delta) return grad # Initialization L_S = 150; L_f = 1; N_T = 200; delta_t = T / (N_T-1); N_S = 45; N_f = 45; t_span = np.linspace(0, T, N_T) f_span = np.linspace(0, L_f, N_f) S_span = np.linspace(0, L_S, N_S) def grid_idx_from(S,S_span): min_S = S_span[0] N_S = np.size(S_span) max_S = S_span[N_S-1] delta_S = (max_S-min_S)/(N_S-1) return max(min(int(round(S/delta_S)), N_S-1),0) F_vals = np.zeros([np.size(f_span), np.size(S_span)]) f_times_V_vals = np.zeros([np.size(f_span), np.size(S_span)]) grad_F_vals = np.zeros([np.size(f_span), np.size(S_span), 2]) for f_idx in range(0, np.size(f_span)): for S_idx in range(0, np.size(S_span)): f = f_span[f_idx] S = S_span[S_idx] F_vals[f_idx,S_idx] = running_gain0(T, f, S, 10, 0) f_times_V_vals[f_idx,S_idx] = f*trading_volume0(f,S) grad_F_vals[f_idx,S_idx,:] = grad_x_of_running_gain_0(T, np.array([f, S]), 10, 0) max_idx = np.unravel_index(np.argmax(F_vals, axis=None),F_vals.shape) print(f_span[max_idx[0]]) print(S_span[max_idx[1]]) plotGridX, plotGridY = np.meshgrid(S_span, f_span) fig = plt.figure() #ax1 = fig.add_subplot(111,projection='3d') ax1 = fig.gca(projection='3d') surf = ax1.plot_surface(plotGridX, plotGridY, f_times_V_vals[:,:], cmap=cm.autumn, antialiased=True) ax1.set_xlabel('stake') ax1.set_ylabel('fee') ax1.set_zlabel('V') ax1.set_zlim(0, 40000) ax1.view_init(30, 20) ax1.set_title('Agent 1') plt.savefig('response1.pdf') gamma_f = -0.02 gamma_S = 5 m = 1 def drift_0(a0,a1): b = np.zeros(2) b[0] = gamma_f*(a0+a1) b[1] = gamma_S*a0 return b def drift_1(a0,a1): b = np.zeros(2) b[0] = gamma_f*(a0+a1) b[1] = gamma_S*a1 return b def grad_a0_H0(y,a0,a1): val = gamma_f*y[0] + gamma_S*y[1] - 2*delta_a*a0 return val def grad_a1_H1(y,a0,a1): val = gamma_f*y[0] + gamma_S*y[1] - 2*delta_a*a1 return val # Fix initial fee & and stake of two players fee_init = 0.5 # has to be between 0 and 1 player0_stake = 250 player1_stake = 10 # Learning params: # higher value means faster convergence but less stability i.e.: # if you see stupid output (explosion, negative fees etc.) set this lower. rho = 0.05 # learning takes a long time and if it says "failed at the end it might just means that it's still updating a bit." max_iter = 6000 #stopping criteria: once the updates are smaller than this in l-infinity then stop max_error = 0.1 # fees are the 0th component, stake is the 1st component # first player, index 0 actions0 = np.zeros([1,N_T+1]) x_vals0 = np.zeros([2,N_T+1]) x_vals0[:,0] = np.array([fee_init, player0_stake]) y_vals0 = np.zeros([2,N_T+1]) # second player, index 1 actions1 = np.zeros([1,N_T+1]) x_vals1 = np.zeros([2,N_T+1]) x_vals1[:,0] = np.array([fee_init, player1_stake]) y_vals1 = np.zeros([2,N_T+1]) def run_iterative_system(max_iter,max_error): actions_old0 = np.zeros([1,N_T+1]) actions_old1 = np.zeros([1,N_T+1]) diff = 0; failed_to_converge=True for iter_idx in range(0,max_iter): # Run x0, x1 forwards for i in range(0,N_T): x_vals0[:,i+1] = x_vals0[:,i] + drift_0(actions0[0,i], actions1[0,i]) * delta_t # second guy only updates the stake # but the fee evolution is copied from first x_vals1[0,i+1] = x_vals0[0,i+1] x_vals1[1,i+1] = x_vals1[1,i] + drift_1(actions0[0,i], actions1[0,i])[1] * delta_t # Run y0, y1 backwards y_vals0[:,N_T] = np.zeros(2) y_vals1[:,N_T] = np.zeros(2) for i in reversed(range(0,N_T)): S0 = x_vals0[1,i] S1 = x_vals1[1,i] grad_x_F_0 = grad_x_of_running_gain_0(t_span[i], x_vals0[:,i], S1, actions0[0,i]) grad_x_F_1 = grad_x_of_running_gain_1(t_span[i], x_vals1[:,i], S0, actions1[0,i]) y_vals0[:,i] = y_vals0[:,i+1] + grad_x_F_0 * delta_t y_vals1[:,i] = y_vals1[:,i+1] + grad_x_F_1 * delta_t for i in range(0,N_T): # Do one gradient ascent step (we are maximizing) actions0[0,i] = actions0[0,i] + rho*grad_a0_H0(y_vals0[:,i],actions0[0,i],actions1[0,i]) actions1[0,i] = actions1[0,i] + rho*grad_a1_H1(y_vals1[:,i],actions0[0,i],actions1[0,i]) diff0 = np.max(np.abs(actions0 - actions_old0)) diff1 = np.max(np.abs(actions1 - actions_old1)) if (diff0 < max_error) and (diff1 < max_error) : print('Converged; iteration %d, diff0 is %f, diff1 is %f' % (iter_idx, diff0, diff1)) failed_to_converge = False break actions_old0 = np.copy(actions0) actions_old1 = np.copy(actions1) if failed_to_converge: print('Failed after %d iteration, diff0 is %f, diff1 is %f' % (max_iter, diff0,diff1)) %timeit -n1 -r1 run_iterative_system(max_iter, max_error) plt.plot(t_span, 1000 * fee_scaling * x_vals0[0,0:N_T].T,label='f0 in 10 x %') plt.plot(t_span, 1000 * fee_scaling * x_vals1[0,0:N_T].T,color='green',label='f1 in 10 x %') plt.xlabel('time') plt.plot(t_span, x_vals0[1,0:N_T].T,color='red',label='stake 0') plt.plot(t_span, x_vals1[1,0:N_T].T,color='pink',label='stake 1') plt.title('State evolution - fees and stake') plt.xlabel('time') plt.ylabel('level') plt.legend() plt.savefig('state.pdf') fig = plt.figure() plt.plot(t_span, actions0[0,0:N_T].T,label='a - 0') plt.plot(t_span, actions1[0,0:N_T].T, color='green',label='a - 1') plt.title('Actions evolution') plt.xlabel('time') plt.ylabel('actions fees') plt.xlabel('time') plt.ylabel('level') plt.legend() plt.savefig('actions.pdf') print('Minimum fee %.2f%%. Final fee %.2f%%.' % (fee_scaling * 100*min(x_vals1[0,0:N_T]),fee_scaling * 100*x_vals1[0,N_T-1])) print('Minimum stake %.0f. Maximum stake %.0f. Final stake %.0f.' % (min(x_vals0[1,0:N_T]+x_vals1[1,0:N_T]),max(x_vals0[1,0:N_T]+x_vals1[1,0:N_T]),x_vals0[1,N_T-1]+x_vals1[1,N_T-1])) # Adjoint process plot: this is a 'dummy' process used in the optimization # and you can ignore it if all goes well fig = plt.figure() plt.plot(t_span, 0.1*y_vals0[0,0:N_T].T, label='adj. fees 0') plt.plot(t_span, 0.1*y_vals1[0,0:N_T].T, color='green', label='adj. fees 1') plt.xlabel('time') plt.plot(t_span, y_vals0[1,0:N_T].T, color = 'red', label='adj. stake 0') plt.plot(t_span, y_vals1[1,0:N_T].T, color = 'pink', label='adj. stake 0') plt.title('Adjoint evolution - fees and stake') plt.xlabel('time') plt.legend() ```
github_jupyter
``` import sys import os sys.path.insert(0, os.path.abspath('../src/')) ``` # Plotting ``` from pathlib import Path import SimplePreprocessor as sp DATASETPATH = Path("../dataset/") pr = sp.SimplePreprocessor(deltas=True, discretize=False, flevel="MAGIK") netdata = pr.load_path(DATASETPATH) netdata["_date"] = netdata.index.get_level_values("_time").strftime('%a %d %b %y') import numpy as np import ipywidgets as widgets from IPython.display import display, Markdown import matplotlib.pyplot as plt import matplotlib.dates as mdates from ipywidgets import HBox, VBox, interactive, Layout devices_idxs = netdata.index.droplevel(2).unique() devices = [f"{host} ({cat})" for cat, host in devices_idxs] devices.sort() available_channels = [c for c in netdata.columns if (("time" not in c) and (c[0] != "_"))] available_channels.sort() available_days = np.unique(netdata["_date"]) # ----- ----- WIDGETS ----- ----- # # ----- ----- ------- ----- ----- # device_w_list = widgets.Dropdown(options=devices) days_w_list = widgets.Dropdown(options=available_days) selectedc_w_list = widgets.SelectMultiple(options=available_channels, description='Channel', layout=Layout(width='400px')) timerange_slider = widgets.FloatSlider(min=.005, max=1., step=.005) smoothing_slider = widgets.FloatSlider(min=0, max=79, step=4, description="Smoothing (aggregate x minutes)") offset_slider = widgets.FloatSlider(min=.0, max=1., step=.01) ts_selector = HBox([device_w_list, days_w_list]) col_selector = HBox([selectedc_w_list]) ts_shifting = HBox([timerange_slider, offset_slider]) wlist = VBox([ts_selector, col_selector, ts_shifting, smoothing_slider]) # ----- ----- PLOTTER ----- ----- # # ----- ----- ------- ----- ----- # def mprint(s): display(Markdown(s)) def randcolors(n): hexl = list('0123456789ABCDEF') hexc = np.random.choice(hexl, size=(n, 6)) return ['#' + ''.join(x) for x in hexc] def remove_empty(data): empty_cols = [ c for c in data.columns if (data[c]==0).all() ] for c in empty_cols: mprint(f"**<span style='color: red'>Empty series:</span> {c}**") return data.drop(empty_cols, axis=1) def datetime2xaxis(dtseries, smoothing): if len(dtseries) <= 50: return "%a - %H:%M:%S" elif len(dtseries) <= 100: return "%a - %H:%M" else: return "%a - %H" def describe_mtimeseries(plotname, data, smoothing=1): # Data description ..... # mprint(f"### {plotname}") start = min(data.index) end = max(data.index) mprint(f"**Time range**: {start} **/** {end}") mprint(f"**Total data range:** {end-start}") mprint(f"**Samples shown**: {len(data)}") mprint(f"**Smoothing**: {int(smoothing / 4)} minutes") if len(data) <= 50: xaxis_format = "%a - %H:%M:%S" elif len(data) <= 100: xaxis_format = "%a - %H:%M" else: xaxis_format = "%a - %H" # Plotting clean data ..... # empty_cols = [] legend = [] data = remove_empty(data) # Smoothing ..... # channels = data.drop(["_isanomaly"], axis=1).columns data[channels] = data[channels].rolling(smoothing, center=True).sum() / smoothing data = data.dropna() anomaly_mask = (data["_isanomaly"] != "none") for idx, c in enumerate(channels): legend.append(c) fig, ax = plt.subplots(figsize=(12, 6)) ax.format_xdata = mdates.DateFormatter(xaxis_format) ax.plot(data.index, data[c]) fig.autofmt_xdate() if anomaly_mask.any(): attack_data = data[anomaly_mask] for anomalyname, anomalydata in attack_data.groupby("_isanomaly"): legend.append(anomalyname) anomalydata = anomalydata.drop("_isanomaly", axis=1) ax.plot(anomalydata.index, anomalydata.values) fig.autofmt_xdate() fig.suptitle(f"{c}", fontweight="bold") plt.legend(legend) plt.show() # ----- ----- INTERACTOR ----- ----- # # ----- ----- ---------- ----- ----- # def whandler(device, day, channel, timerange, offset, smoothing): split = device.split(" ") host = split[0].strip() category = " ".join(split[1:]).replace("(", "").replace(")", "").strip() data = netdata[netdata["_date"]==day] chs = set(channel) chs.add("_isanomaly") chs = list(chs) data = data.loc[category, host][chs] # Filtering time range full_length = len(data) start_idx = int(full_length * offset) end_idx = min(start_idx + int(full_length * timerange), full_length) data = data.iloc[start_idx:end_idx] describe_mtimeseries(device, data, int(smoothing+1)) %matplotlib inline output = widgets.interactive(whandler, device=device_w_list, day=days_w_list, channel=selectedc_w_list, timerange=timerange_slider, offset=offset_slider, smoothing=smoothing_slider).children[-1] display(wlist) display(output) ```
github_jupyter
<a href="https://colab.research.google.com/github/cedeerwe/brutalna-akademia/blob/master/notebooks/zaverecny_test.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Inštrukcie Test pozostáva zo 7 príkladov, dokopy za 50 bodov. Na test máš 3 hodiny času, ktoré si musíš odsledovať sám/sama. Časovač si spusti vtedy, keď si začneš čítať zadanie prvej úlohy. Každá úloha má v názve uvedený počet bodov, ktoré môžeš získať za kompletné riešenie. Čiastočné riešenia budú tiež bodované. Ak si to úloha vyžaduje, k riešeniu patrí aj vysvetlenie, prečo je vaše riešenie riešením. Úlohy riešte priamo v kópii tohto colabu. Po dokončení nám svoj colab pošlite mailom. O teste prosím žiadným spôsobom nekomunikovať s ostatnými, kým k tomu nebudete vyzvaní (všetci ho odovzdajú). Držíme palce! # Úlohy ## Šípky (6 bodov) ### Zadanie Idete sa zúčastniť súťaže v hádzaní šípok ([referencia](https://en.wikipedia.org/wiki/Darts#/media/File:Dartboard_diagram.svg)). Zlepšiť sa už síce nestihnete, ale môžte sa aspoň zamyslieť nad svojou stratégiou, prípadne si hodiť pár skúšobných hodov. Kam by ste mali mieriť so svojími schopnosťami, aby ste maximalizovali svoj bodový zisk? ### Riešenie ## Poker (5 bodov) ### Zadanie Prihlásili ste sa do súťaže v hraní matematického pokru. Pravidlá sú nasledovné: 1. hru hrajú dvaja hráči, 1. obaja hráči na začiatku do hry vložia 1€ 1. každý hráč si vytiahne rovnomerne náhodné číslo od 0 po 1, predstavujúce silu jeho kariet, 1. náhodne sa určí začínajúci hráč, 1. začínajúci hráč môže: a) *fold* a prehrať, b) *raise* a zvýšiť hru o 1€, c) *check* a nechať ísť druhého hráča, 1. druhý hráč môže: a) *fold* a prehrať, b) *call* a dorovnať prvého hráča, ak zvýšil, c) *check* a pokračovať v hre, ak prvý hráč nezvýšil, 1. ak žiadny z hráčov nezložil, porovnajú sa čísla a víťaz si berie všetky stávky Aká by mala byť vaša optimálna stratégia v tejto hre? Ako by sa zmenila vaša stratégia, keby prvý hráč mohol zvýšiť o ľubovoľnú sumu, nie iba 1€? ### Riešenie ## Random playlist (10 bodov) ### Zadanie Vlastníte službu na streamovanie hudby a máte veľa zákazníkov, ktorí majú veľa obľubených pesničiek. Vyžiadali si od vás funkcionalitu, aby si mohli púšťať svoje pesničky ako "random shuffle", teda v náhodnom poradí. Každá pesnička má vo vašom katalógu niekoľko vlastností, konkrétne: - interpret - reprezentovaný ako "a", "b", "c", ... - žáner - reprezentovaný ako "A", "B", "C", ... - číslo pesničky - reprezentované ako 1, 2, 3, ... Toto celé je reprezentované ako trojica: ``` priklad = ("a", "F", 9) ``` Dostali ste zoznam 100 obľúbených pesničiek istého uživateľa. Vygenerujte z nej postupnosť 10,000 pesničiek vyskladaných z týchto 100 pesničiek v takom poradí, ako by ste mu ich pustili za sebou, keby mal svoj "random shuffle" pustený fakt dlho. Ohodnotení budete na základe spokojnosti zákazníka po vypočutí všetkých 10,000 pesničiek. Zákazník očakáva od "random shuffle", že keď si ho pustí, tak pesničky budú chodiť rozumne náhodne a nebude počúvať za sebou príliš veľa podobných. ``` zoznam_pesniciek = [ ('f', 'D', 0), ('j', 'C', 1), ('h', 'B', 2), ('e', 'D', 3), ('c', 'A', 4), ('a', 'C', 5), ('j', 'B', 6), ('i', 'D', 7), ('a', 'C', 8), ('d', 'B', 9), ('i', 'C', 10), ('i', 'D', 11), ('g', 'D', 12), ('f', 'B', 13), ('b', 'C', 14), ('b', 'D', 15), ('g', 'A', 16), ('c', 'A', 17), ('j', 'C', 18), ('h', 'A', 19), ('f', 'B', 20), ('e', 'C', 21), ('c', 'E', 22), ('i', 'B', 23), ('b', 'A', 24), ('g', 'D', 25), ('b', 'D', 26), ('b', 'A', 27), ('i', 'C', 28), ('g', 'E', 29), ('c', 'C', 30), ('a', 'D', 31), ('g', 'B', 32), ('d', 'B', 33), ('g', 'B', 34), ('f', 'A', 35), ('g', 'C', 36), ('a', 'B', 37), ('f', 'D', 38), ('i', 'A', 39), ('g', 'C', 40), ('d', 'D', 41), ('d', 'A', 42), ('e', 'A', 43), ('g', 'E', 44), ('d', 'D', 45), ('b', 'A', 46), ('e', 'E', 47), ('f', 'B', 48), ('i', 'A', 49), ('e', 'D', 50), ('c', 'A', 51), ('i', 'E', 52), ('j', 'E', 53), ('d', 'A', 54), ('d', 'C', 55), ('e', 'C', 56), ('a', 'C', 57), ('h', 'C', 58), ('i', 'E', 59), ('h', 'B', 60), ('e', 'C', 61), ('a', 'A', 62), ('f', 'A', 63), ('d', 'A', 64), ('f', 'D', 65), ('d', 'A', 66), ('a', 'E', 67), ('e', 'E', 68), ('d', 'E', 69), ('b', 'B', 70), ('i', 'A', 71), ('j', 'D', 72), ('h', 'B', 73), ('c', 'E', 74), ('i', 'D', 75), ('j', 'B', 76), ('e', 'C', 77), ('e', 'B', 78), ('g', 'A', 79), ('d', 'E', 80), ('i', 'E', 81), ('b', 'A', 82), ('d', 'E', 83), ('b', 'C', 84), ('c', 'B', 85), ('j', 'D', 86), ('a', 'E', 87), ('h', 'E', 88), ('i', 'C', 89), ('c', 'A', 90), ('i', 'C', 91), ('e', 'D', 92), ('a', 'E', 93), ('g', 'A', 94), ('b', 'B', 95), ('h', 'D', 96), ('a', 'A', 97), ('d', 'E', 98), ('i', 'B', 99) ] ``` ### Riešenie ## Štvorsten (6 bodov) ### Zadanie Idete sa zúčastniť skutočného turnaja v navrhovaní hracích štvorstenov, ktorého sa zúčastnia všetci účastníci, ktorí odovzdajú riešenie tejto úlohy. Počet bodov za túto úlohu bude záležať od vášho skutočného umiestnenia. Hracie štvorsteny sa od hracích kociek tým, že majú iba 4 steny a počet bodiek na každej stene je v súčte iba 6. Navyše môžu byť tieto bodky ľubovoľne rozdelené po všetkých stenách. Hracie štvorsteny sa porovnávajú tak, že spočítame kto má väčšiu šancu hodiť vyššie číslo - ten hráč vyhrá a získa 2 body. V prípade rovnosti je remíza za 1 bod pre každého. Ako príklad, ak by sme porovnávali štvorsten [6, 0, 0, 0] a štvorsten [3, 2, 1, 0], prvý štvorsten vyhrá so šancou $\tfrac{1}{4}$, druhý štvorsten vyhrá so šancou $\tfrac{9}{16}$ a vo zvyšných $\tfrac{3}{16}$ je remíza. Druhý hráč by teda získal 2 body a prvý 0 bodov. Turnaj má dve rôzne disciplíny: 1. Navrhnite jeden štvorsten, ktorý pôjde do turnaja a bude sa porovnávať s ostatnými. *Príklad riešenia: [3, 2, 1, 0].* 2. Navrhnite pravdepodobnostné rozdelenie cez všetky štvorsteny, aby ste v priemere získali čo najviac bodov pri porovnaní s pravdepodobnostnými rozdeleniami ostatných hráčov. *Príklad riešenia: 50% [3,2,1,0], 30% [3,3,0,0], 20% [6, 0, 0, 0].* ### Riešenie ## Internet (5 bodov) ### Zadanie Dostali ste otázku "Ako funguje internet?" od istej osoby. Vašou úlohou je to tejto osobe vysvetliť jednou vetou a jedným odkazom na stránku, z ktorej pochopí viac, ak by ste chcela. Spomínaná osoba je jednou z nasledujúcich možností: 1. prváčik v škole 1. váš rovesník z akadémie 1. učiteľ informatiky na univerzite 1. Bill Gates 1. babička na ulici Zodpovedajte túto úlohu pre **každú** z vyššie uvedených možností. ### Riešenie ## Rosnička (9 bodov) ### Zadanie V tejto úlohe máte za cieľ predpovedať isté hodnoty, pre konkrétny deň - 1. mája. Pre každú z týchto hodnôt si prosíme - bodový odhad, - 80% interval spoľahlivosti. Hodnoty, ktoré nás zaujímajú: - počet vykonaných testov na koronavírus na Slovensku - počet Facebookových statusov od premiéra Igora Matoviča https://www.facebook.com/igor.matovic.7 - maximálna denná teplota v obci Bukovina podľa [SHMÚ](http://www.shmu.sk/sk/?page=1) - minimálna cena za barel ropy podľa https://markets.businessinsider.com/commodities/oil-price?type=wti - počet návštev na stránke https://en.wikipedia.org/wiki/Education - počet 250g balení masla v mrazničke v domácnosti vášho lektora - Dominika Csibu - rátajú sa iba tie s obsahom tuku 82% ### Riešenie ## Ale fakt? (9 bodov) ### Zadanie Zodpovedajte na nasledujúce otázky: 1. najviac koľkokrát za deň by sme mohli osláviť nový rok? 1. ktorý ostrov nazval Christopher Columbus *holy glory*? 1. ako najrýchlejšie zvládol človek postrkať nosom pomaranč, aby ním prešiel jeden kilometer? 1. aká je najdlhšia kosť v tele autora knihy *Life without limits*? 1. ktorý kuchynský projekt na kickstarteri mal svoj cieľ prekonaný viac ako 5000x násobne? ### Riešenie
github_jupyter
# Data types & Structures ### A great advatage of `Python` is the type of data it can handle & combine Python has been widely used to handle internet related operations, which means lots and lots of text and numbers. combined! *** ## Let's start with the basic types! ### Like other programing languages, `Python` data types include integers, floats, complex, and strings & boolean <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>In the next cell, assign a float value to <b>x</b> and execute the cell </div> ``` lat = 20 print(lat) ``` <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>Assign an integer value to <b>y</b> and execute the cell </div> ``` y = print(y, lat*y) ``` ### Complex values are identified by a `j` at the end <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>Assign a complex value to <b>z</b> and execute the cell </div> ``` z = print(z, type(z)) ``` ### Variables can be reassigned to other types anytime <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>Execute the next cell </div> ``` lat = 'Latitude' print(lat) step = True print(step) ``` <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>Define your own string and boolean variables and print their type in the next cell </div> ## One of the best types: datetime! <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>- Execute the code in the next cell to define two datetime variables: <b>today1</b> & <b>today2</b> </div> ``` from datetime import date # call date function inside the datetime package today1 = date.today() from datetime import datetime # call datetime function inside the datetime package today2 = datetime.now() ``` <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br> - Now print both variables <br> - Try printing <b>today1.month</b> <br> - Try printing the following <b>today2.strftime('%c')</b> <br> - Now print the type of one of your date variables </div> <br>Note the use of <b>.</b> after a variable. This refers to a method of the variable or, as Python refers to it, the object. We will use other functions of the datetime package later, and you could find more details about the attributes of the datetime object (variable): https://www.guru99.com/date-time-and-datetime-classes-in-python.html *** ## Python has a some basic data collections, we will talk about three of them: List - ordered, changeable, allows duplicates Tuple - ordered, unchangeable, allows duplicates Dictionary - unordered, changeable, no duplicates allowed *** ## Lists: ordered, changeable, allows dupplicates <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>- Execute the code below and print the list <br> - Print the type of <b>mylist</b> </div> ``` mylist=['temperature', 'wind', 'salinity'] # note the use of [ ] ``` ### To access an element of a list we use the indices, that start at `0` <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>- Try printing: <b>mylist[0]</b> <br> - Now try reassigning the value of the second value to <b>'current velocity'</b> </div> ### To add an element to the list use the method append <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>- Try <b>mylist.append('wind speed')</b> <br> - Then execute the next cell to print the entire list with a for loop </div> ``` for myvar in mylist: print(myvar+" has been recorded") ``` ### Copying a list (or another object) needs to be done explicitely, other wise is just a new name for your variable <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>- Try these two codes: <br> <b> yourlist1 = mylist.copy()</b> <br> <b> yourlist2 = mylist</b> <br> - Then modify <b>yourlist1</b> and print it along with <b>mylist</b> <br> - Now modify <b>yourlist2</b> and print it along with <b>mylist</b> </div> *** ## Tuples: ordered, unchangeable, allows duplicates <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>- Execute the code below and print the tuple <br> - Print the type of <b>mytuple</b> <br> - Print one element of <b>mytuple</b> </div> ``` mytuple = ('latitude', 'longitude', 'time') # note the use of ( ) ``` <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br> - Now try reassigning an element of <b>mytuple</b> </div> *** ## Dictionaries: unordered, changeable, no duplicates allowed ### Indexed pair of keys and values <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>- Execute the code below, and print the dictionary <br> - Add a new element to <b>mydict</b> with <b>mydict['units']='C'</b> <br> - Print one element of <b>mydict</b>. <i>Hint: the key is the key</i> </div> ``` mydict = {'instrument': 'temperature sensor', 'measurement':'SST','depth': 5} ``` Certain libraries have specific data structures - arrays, data frames, and datasets examples of each, but we will go back talking about each library. *** *** # Few words about `Objects`, `Attributes` & `Methods` ## `Python` is an object oriented programming language. This means almost everything is an object or instances of a class. Variables are objects. And therefore they have `attributes` & `methods` ### `Properties` or `Attributes` are accessed with `.attribute` after the object ### `Methods` are functions, & are accessed with `.method(arguments)` after the object We're not going to teach you how to create classes with properties or methods, but how to access them, because we will use them extensively <div class="alert alert-block alert-info"> <b>Try it out!</b> <br><br>Execute the code in the next cell to access the attributes and one method of the class <b>date</b> </div> ``` today = date.today() print(today) ## Date object attributes print(today.year, today.month, today.day) ## Date object method 'ctime' - do not need arguments print(today.ctime()) ```
github_jupyter
# Masakhane - Machine Translation for African Languages (Using JoeyNMT) ## Note before beginning: ### - The idea is that you should be able to make minimal changes to this in order to get SOME result for your own translation corpus. ### - The tl;dr: Go to the **"TODO"** comments which will tell you what to update to get up and running ### - If you actually want to have a clue what you're doing, read the text and peek at the links ### - With 100 epochs, it should take around 7 hours to run in Google Colab ### - Once you've gotten a result for your language, please attach and email your notebook that generated it to [email protected] ### - If you care enough and get a chance, doing a brief background on your language would be amazing. See examples in [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) ## Retrieve your data & make a parallel corpus If you are wanting to use the JW300 data referenced on the Masakhane website or in our GitHub repo, you can use `opus-tools` to convert the data into a convenient format. `opus_read` from that package provides a convenient tool for reading the native aligned XML files and to convert them to TMX format. The tool can also be used to fetch relevant files from OPUS on the fly and to filter the data as necessary. [Read the documentation](https://pypi.org/project/opustools-pkg/) for more details. Once you have your corpus files in TMX format (an xml structure which will include the sentences in your target language and your source language in a single file), we recommend reading them into a pandas dataframe. Thankfully, Jade wrote a silly `tmx2dataframe` package which converts your tmx file to a pandas dataframe. ``` from google.colab import drive drive.mount('/content/drive') # TODO: Set your source and target languages. Keep in mind, these traditionally use language codes as found here: # These will also become the suffix's of all vocab and corpus files used throughout import os source_language = "en" target_language = "nya" lc = False # If True, lowercase the data. seed = 42 # Random seed for shuffling. tag = "baseline" # Give a unique name to your folder - this is to ensure you don't rewrite any models you've already submitted os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts os.environ["tgt"] = target_language os.environ["tag"] = tag # This will save it to a folder in our gdrive instead! !mkdir -p "/content/drive/My Drive/masakhane/$src-$tgt-$tag" g_drive_path = "/content/drive/My Drive/masakhane/%s-%s-%s" % (source_language, target_language, tag) os.environ["gdrive_path"] = g_drive_path models_path = '%s/models/%s%s_transformer'% (g_drive_path, source_language, target_language) # model temporary directory for training model_temp_dir = "/content/drive/My Drive/masakhane/model-temp" # model permanent storage on the drive !mkdir -p "$gdrive_path/models/${src}${tgt}_transformer/" !echo $gdrive_path #TODO: Skip for retrain # Install opus-tools ! pip install opustools-pkg #TODO: Skip for retrain # Downloading our corpus ! opus_read -d JW300 -s $src -t $tgt -wm moses -w jw300.$src jw300.$tgt -q # extract the corpus file ! gunzip JW300_latest_xml_$src-$tgt.xml.gz # extract the corpus file ! gunzip JW300_latest_xml_$tgt-$src.xml.gz #TODO: Skip for retrain # Download the global test set. ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-any.en # And the specific test set for this language pair. os.environ["trg"] = target_language os.environ["src"] = source_language ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.en ! mv test.en-$trg.en test.en ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.$trg ! mv test.en-$trg.$trg test.$trg #TODO: Skip for retrain # Read the test data to filter from train and dev splits. # Store english portion in set for quick filtering checks. en_test_sents = set() filter_test_sents = "test.en-any.en" j = 0 with open(filter_test_sents) as f: for line in f: en_test_sents.add(line.strip()) j += 1 print('Loaded {} global test sentences to filter from the training/dev data.'.format(j)) #TODO: Skip for retrain import pandas as pd # TMX file to dataframe source_file = 'jw300.' + source_language target_file = 'jw300.' + target_language source = [] target = [] skip_lines = [] # Collect the line numbers of the source portion to skip the same lines for the target portion. with open(source_file) as f: for i, line in enumerate(f): # Skip sentences that are contained in the test set. if line.strip() not in en_test_sents: source.append(line.strip()) else: skip_lines.append(i) with open(target_file) as f: for j, line in enumerate(f): # Only add to corpus if corresponding source was not skipped. if j not in skip_lines: target.append(line.strip()) print('Loaded data and skipped {}/{} lines since contained in test set.'.format(len(skip_lines), i)) df = pd.DataFrame(zip(source, target), columns=['source_sentence', 'target_sentence']) # if you get TypeError: data argument can't be an iterator is because of your zip version run this below #df = pd.DataFrame(list(zip(source, target)), columns=['source_sentence', 'target_sentence']) df.head(10) ``` ## Pre-processing and export It is generally a good idea to remove duplicate translations and conflicting translations from the corpus. In practice, these public corpora include some number of these that need to be cleaned. In addition we will split our data into dev/test/train and export to the filesystem. ``` #TODO: Skip for retrain # drop duplicate translations df_pp = df.drop_duplicates() # drop conflicting translations # (this is optional and something that you might want to comment out # depending on the size of your corpus) df_pp.drop_duplicates(subset='source_sentence', inplace=True) df_pp.drop_duplicates(subset='target_sentence', inplace=True) # Shuffle the data to remove bias in dev set selection. df_pp = df_pp.sample(frac=1, random_state=seed).reset_index(drop=True) #TODO: Skip for retrain # Install fuzzy wuzzy to remove "almost duplicate" sentences in the # test and training sets. ! pip install fuzzywuzzy ! pip install python-Levenshtein import time from fuzzywuzzy import process import numpy as np # reset the index of the training set after previous filtering df_pp.reset_index(drop=False, inplace=True) # Remove samples from the training data set if they "almost overlap" with the # samples in the test set. # Filtering function. Adjust pad to narrow down the candidate matches to # within a certain length of characters of the given sample. def fuzzfilter(sample, candidates, pad): candidates = [x for x in candidates if len(x) <= len(sample)+pad and len(x) >= len(sample)-pad] if len(candidates) > 0: return process.extractOne(sample, candidates)[1] else: return np.nan # NOTE - This might run slow depending on the size of your training set. We are # printing some information to help you track how long it would take. scores = [] start_time = time.time() for idx, row in df_pp.iterrows(): scores.append(fuzzfilter(row['source_sentence'], list(en_test_sents), 5)) if idx % 1000 == 0: hours, rem = divmod(time.time() - start_time, 3600) minutes, seconds = divmod(rem, 60) print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds), "%0.2f percent complete" % (100.0*float(idx)/float(len(df_pp)))) # Filter out "almost overlapping samples" df_pp['scores'] = scores df_pp = df_pp[df_pp['scores'] < 95] #TODO: Skip for retrain # This section does the split between train/dev for the parallel corpora then saves them as separate files # We use 1000 dev test and the given test set. import csv # Do the split between dev/train and create parallel corpora num_dev_patterns = 1000 # Optional: lower case the corpora - this will make it easier to generalize, but without proper casing. if lc: # Julia: making lowercasing optional df_pp["source_sentence"] = df_pp["source_sentence"].str.lower() df_pp["target_sentence"] = df_pp["target_sentence"].str.lower() # Julia: test sets are already generated dev = df_pp.tail(num_dev_patterns) # Herman: Error in original stripped = df_pp.drop(df_pp.tail(num_dev_patterns).index) with open("train."+source_language, "w") as src_file, open("train."+target_language, "w") as trg_file: for index, row in stripped.iterrows(): src_file.write(row["source_sentence"]+"\n") trg_file.write(row["target_sentence"]+"\n") with open("dev."+source_language, "w") as src_file, open("dev."+target_language, "w") as trg_file: for index, row in dev.iterrows(): src_file.write(row["source_sentence"]+"\n") trg_file.write(row["target_sentence"]+"\n") #stripped[["source_sentence"]].to_csv("train."+source_language, header=False, index=False) # Herman: Added `header=False` everywhere #stripped[["target_sentence"]].to_csv("train."+target_language, header=False, index=False) # Julia: Problematic handling of quotation marks. #dev[["source_sentence"]].to_csv("dev."+source_language, header=False, index=False) #dev[["target_sentence"]].to_csv("dev."+target_language, header=False, index=False) # Doublecheck the format below. There should be no extra quotation marks or weird characters. ! head train.* ! head dev.* ``` --- ## Installation of JoeyNMT JoeyNMT is a simple, minimalist NMT package which is useful for learning and teaching. Check out the documentation for JoeyNMT [here](https://joeynmt.readthedocs.io) ``` # Install JoeyNMT ! git clone https://github.com/joeynmt/joeynmt.git ! cd joeynmt; pip3 install . ``` # Preprocessing the Data into Subword BPE Tokens - One of the most powerful improvements for agglutinative languages (a feature of most Bantu languages) is using BPE tokenization [ (Sennrich, 2015) ](https://arxiv.org/abs/1508.07909). - It was also shown that by optimizing the umber of BPE codes we significantly improve results for low-resourced languages [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021) [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) - Below we have the scripts for doing BPE tokenization of our data. We use 4000 tokens as recommended by [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021). You do not need to change anything. Simply running the below will be suitable. ``` #TODO: Skip for retrain # One of the huge boosts in NMT performance was to use a different method of tokenizing. # Usually, NMT would tokenize by words. However, using a method called BPE gave amazing boosts to performance # Do subword NMT from os import path os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts os.environ["tgt"] = target_language # Learn BPEs on the training data. os.environ["data_path"] = path.join("joeynmt", "data", source_language + target_language) # Herman! ! subword-nmt learn-joint-bpe-and-vocab --input train.$src train.$tgt -s 4000 -o bpe.codes.4000 --write-vocabulary vocab.$src vocab.$tgt # Apply BPE splits to the development and test data. ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < train.$src > train.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < train.$tgt > train.bpe.$tgt ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < dev.$src > dev.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < dev.$tgt > dev.bpe.$tgt ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < test.$src > test.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < test.$tgt > test.bpe.$tgt # Create directory, move everyone we care about to the correct location ! mkdir -p $data_path ! cp train.* $data_path ! cp test.* $data_path ! cp dev.* $data_path ! cp bpe.codes.4000 $data_path ! ls $data_path # Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path ! cp train.* "$gdrive_path" ! cp test.* "$gdrive_path" ! cp dev.* "$gdrive_path" ! cp bpe.codes.4000 "$gdrive_path" ! ls "$gdrive_path" # Create that vocab using build_vocab ! sudo chmod 777 joeynmt/scripts/build_vocab.py ! joeynmt/scripts/build_vocab.py joeynmt/data/$src$tgt/train.bpe.$src joeynmt/data/$src$tgt/train.bpe.$tgt --output_path "$gdrive_path/vocab.txt" # Some output ! echo "BPE Nyanja Sentences" ! tail -n 5 test.bpe.$tgt ! echo "Combined BPE Vocab" ! tail -n 10 "$gdrive_path/vocab.txt" # Herman ``` # Creating the JoeyNMT Config JoeyNMT requires a yaml config. We provide a template below. We've also set a number of defaults with it, that you may play with! - We used Transformer architecture - We set our dropout to reasonably high: 0.3 (recommended in [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021)) Things worth playing with: - The batch size (also recommended to change for low-resourced languages) - The number of epochs (we've set it at 30 just so it runs in about an hour, for testing purposes) - The decoder options (beam_size, alpha) - Evaluation metrics (BLEU versus Crhf4) ``` def get_last_checkpoint(directory): last_checkpoint = '' try: for filename in os.listdir(directory): if 'best' in filename and filename.endswith(".ckpt"): return filename if not 'best' in filename and filename.endswith(".ckpt"): if not last_checkpoint or int(filename.split('.')[0]) > int(last_checkpoint.split('.')[0]): last_checkpoint = filename except FileNotFoundError as e: print('Error Occur ', e) return last_checkpoint # Copy the created models from the temporary storage to main storage on google drive for persistant storage # the content of te folder will be overwrite when you start trainin !cp -r "/content/drive/My Drive/masakhane/model-temp/"* "$gdrive_path/models/${src}${tgt}_transformer/" last_checkpoint = get_last_checkpoint(models_path) print('Last checkpoint :',last_checkpoint) # This creates the config file for our JoeyNMT system. It might seem overwhelming so we've provided a couple of useful parameters you'll need to update # (You can of course play with all the parameters if you'd like!) name = '%s%s' % (source_language, target_language) gdrive_path = os.environ["gdrive_path"] # Create the config config = """ name: "{name}_transformer" data: src: "{source_language}" trg: "{target_language}" train: "{gdrive_path}/train.bpe" dev: "{gdrive_path}/dev.bpe" test: "{gdrive_path}/test.bpe" level: "bpe" lowercase: False max_sent_length: 100 src_vocab: "{gdrive_path}/vocab.txt" trg_vocab: "{gdrive_path}/vocab.txt" testing: beam_size: 5 alpha: 1.0 training: #load_model: "{gdrive_path}/models/{name}_transformer/{last_checkpoint}" # TODO: uncommented to load a pre-trained model from last checkpoint random_seed: 42 optimizer: "adam" normalization: "tokens" adam_betas: [0.9, 0.999] scheduling: "plateau" # TODO: try switching from plateau to Noam scheduling patience: 5 # For plateau: decrease learning rate by decrease_factor if validation score has not improved for this many validation rounds. learning_rate_factor: 0.5 # factor for Noam scheduler (used with Transformer) learning_rate_warmup: 1000 # warmup steps for Noam scheduler (used with Transformer) decrease_factor: 0.7 loss: "crossentropy" learning_rate: 0.0003 learning_rate_min: 0.00000001 weight_decay: 0.0 label_smoothing: 0.1 batch_size: 4096 batch_type: "token" eval_batch_size: 3600 eval_batch_type: "token" batch_multiplier: 1 early_stopping_metric: "ppl" epochs: 50 # TODO: Decrease for when playing around and checking of working. Around 30 is sufficient to check if its working at all validation_freq: 1000 # TODO: Set to at least once per epoch. logging_freq: 100 eval_metric: "bleu" model_dir: "{model_temp_dir}" overwrite: True # TODO: Set to True if you want to overwrite possibly existing models. shuffle: True use_cuda: True max_output_length: 100 print_valid_sents: [0, 1, 2, 3] keep_last_ckpts: 3 model: initializer: "xavier" bias_initializer: "zeros" init_gain: 1.0 embed_initializer: "xavier" embed_init_gain: 1.0 tied_embeddings: True tied_softmax: True encoder: type: "transformer" num_layers: 6 num_heads: 4 # TODO: Increase to 8 for larger data. embeddings: embedding_dim: 256 # TODO: Increase to 512 for larger data. scale: True dropout: 0.2 # typically ff_size = 4 x hidden_size hidden_size: 256 # TODO: Increase to 512 for larger data. ff_size: 1024 # TODO: Increase to 2048 for larger data. dropout: 0.3 decoder: type: "transformer" num_layers: 6 num_heads: 4 # TODO: Increase to 8 for larger data. embeddings: embedding_dim: 256 # TODO: Increase to 512 for larger data. scale: True dropout: 0.2 # typically ff_size = 4 x hidden_size hidden_size: 256 # TODO: Increase to 512 for larger data. ff_size: 1024 # TODO: Increase to 2048 for larger data. dropout: 0.3 """.format(name=name, gdrive_path=os.environ["gdrive_path"], source_language=source_language, target_language=target_language, model_temp_dir=model_temp_dir, last_checkpoint=last_checkpoint) with open("joeynmt/configs/transformer_{name}.yaml".format(name=name),'w') as f: f.write(config) ``` # Train the Model This single line of joeynmt runs the training using the config we made above ``` # Train the model # You can press Ctrl-C to stop. And then run the next cell to save your checkpoints! !cd joeynmt; python3 -m joeynmt train configs/transformer_$src$tgt.yaml # Copy the created models from the temporary storage to main storage on google drive for persistant storage !cp -r "/content/drive/My Drive/masakhane/model-temp/"* "$gdrive_path/models/${src}${tgt}_transformer/" # Output our validation accuracy ! cat "$gdrive_path/models/${src}${tgt}_transformer/validations.txt" # Test our model ! cd joeynmt; python3 -m joeynmt test "$gdrive_path/models/${src}${tgt}_transformer/config.yaml" ```
github_jupyter
# Evaluate the Performance of MPNN models Get all of the models, regardless how we trained them and evaluate their performance ``` %matplotlib inline from matplotlib import pyplot as plt from datetime import datetime from sklearn import metrics from tqdm import tqdm from glob import glob import pandas as pd import numpy as np import json import os ``` ## Find the Models and Summarize Them There are `best_model.h5` files in subdirectories that contain data on their configuration. ``` models = glob(os.path.join('**', 'test_predictions.csv'), recursive=True) print(f'Found {len(models)} models') def generate_summary(path): """Generate the summary of a model, given path to its output Args: path (str): Path ot the trained weights Returns: (dict) Model information """ # Store the directory first dir_name = os.path.dirname(path) output = {'path': dir_name} # Get the host and run parameters for f in ['host_info.json', 'run_params.json']: with open(os.path.join(dir_name, f)) as fp: output.update(json.load(fp)) # Compute the number of nodes output['n_nodes'] = output['total_ranks'] // output['ranks_per_node'] \ if 'total_ranks' in output else 1 # Convert the start time to a datetime output['start_time'] = datetime.fromisoformat(output['start_time']) # Get the log infomration log_file = os.path.join(dir_name, 'log.csv') log = pd.read_csv(log_file) output['completed_epochs'] = len(log) output['val_loss'] = log['val_loss'].min() output['loss'] = log['loss'].min() output['epoch_time'] = np.percentile(log['epoch_time'], 50) output['total_train_time'] = log['epoch_time'].sum() output['total_node_hours'] = output['total_train_time'] * output['n_nodes'] # Compute performance on hold-out set results = pd.read_csv(os.path.join(output['path'], 'test_predictions.csv')) for m in ['r2_score', 'mean_squared_error', 'mean_absolute_error', 'median_absolute_error']: v = getattr(metrics, m)(results['y_true'], results['y_pred']) output[m] = v return output model_info = pd.DataFrame([generate_summary(m) for m in models]) print(f'Found {len(model_info)} models') ``` ## Print out Best Performer We are going to pick the one that has the best performance on the test set ### Coarse Network See how we did on the "node per water" network ``` model = model_info.query('network_choice=="coarse"').sort_values('mean_absolute_error').iloc[0] print(f'Model being evaluated: {model["path"]}') model[['path', 'network_choice', 'activation', 'message_steps', 'dropout', 'features', 'batch_size']] model[['loss', 'val_loss', 'mean_squared_error']] ``` Plot the logs ``` log = pd.read_csv(os.path.join(model['path'], 'log.csv')) fig, ax = plt.subplots(figsize=(3.5, 2.5)) ax.semilogy(log['epoch'], log['loss'], label='Train') ax.semilogy(log['epoch'], log['val_loss'], label='Validation') ax.legend() ax.set_xlabel('Epoch') ax.set_ylabel('Loss') ``` *Finding*: Huge variance in validation loss is indicative of overfitting Plot the performance on the test set ``` results = pd.read_csv(os.path.join(model['path'], 'test_predictions.csv')) for m in ['r2_score', 'mean_squared_error', 'mean_absolute_error']: v = getattr(metrics, m)(results['y_true'], results['y_pred']) print(f'{m}: {v: .2f}') ``` Plot the true vs predicted ``` fig, ax = plt.subplots() ax.scatter(results['y_true'], results['y_pred'], s=0.5, alpha=0.2) ax.plot(ax.get_xlim(), ax.get_ylim(), 'k--') ax.set_xlabel('$E$, True') ax.set_ylabel('$E$, ML') fig.set_size_inches(3.5, 3.5) ``` Plot only the largest cluster size ``` subset = results.query(f'n_waters == {results["n_waters"].max()}') print(f'Scores for the {len(subset)} largest molecules with {results["n_waters"].max()} waters') for m in ['r2_score', 'mean_squared_error', 'mean_absolute_error']: v = getattr(metrics, m)(subset['y_true'], subset['y_pred']) print(f'{m}: {v: .2f}') fig, ax = plt.subplots() errors = subset['y_pred'] - subset['y_true'] bins = np.linspace(-10, 10, 256) ax.hist(errors, bins=bins, density=False) ax.set_xlabel('Error (kcal/mol)') ax.set_ylabel('Frequency') fig.set_size_inches(3.5, 2) fig, ax = plt.subplots(figsize=(3.5, 3.5)) ax.scatter(subset['y_true'], subset['y_pred'], s=0.5, alpha=0.1) ax.set_ylim(-340, -305) ax.set_xlim(ax.get_ylim()) ax.set_ylim(ax.get_xlim()) ax.plot(ax.get_xlim(), ax.get_xlim(), 'k--') ax.set_xlabel('$E$ (kcal/mol), True') ax.set_ylabel('$E$ (kcal/mol), ML') fig.tight_layout() ``` ### Atomic Network See how we did for the "node per atom" network ``` model = model_info.query('network_choice=="atomic"').sort_values('mean_absolute_error').iloc[0] print(f'Model being evaluated: {model["path"]}') model[['path', 'network_choice', 'activation', 'message_steps', 'dropout', 'features', 'batch_size']] model[['loss', 'val_loss', 'mean_squared_error']] ``` Plot the logs ``` log = pd.read_csv(os.path.join(model['path'], 'log.csv')) fig, ax = plt.subplots() ax.semilogy(log['epoch'], log['loss'], label='Train') ax.semilogy(log['epoch'], log['val_loss'], label='Validation') ax.legend() ax.set_xlabel('Epoch') ax.set_ylabel('Loss') ``` *Finding*: Huge variance in validation loss is indicative of overfitting Plot the performance on the test set ``` results = pd.read_csv(os.path.join(model['path'], 'test_predictions.csv')) for m in ['r2_score', 'mean_squared_error', 'mean_absolute_error']: v = getattr(metrics, m)(results['y_true'], results['y_pred']) print(f'{m}: {v: .2f}') ``` Plot the true vs predicted ``` fig, ax = plt.subplots(figsize=(3.5, 3.5)) ax.set_title('Performance on hold-out set') ax.scatter(results['y_true'], results['y_pred'], s=0.5, alpha=0.2) ax.plot(ax.get_xlim(), ax.get_ylim(), 'k--') ax.set_xlabel('$E$, True') ax.set_ylabel('$E$, ML') fig.set_size_inches(3.5, 3.5) ``` Plot only the largest cluster size ``` subset = results.query(f'n_waters == {results["n_waters"].max()}') print(f'Scores for the {len(subset)} largest molecules with {results["n_waters"].max()} waters') for m in ['r2_score', 'mean_squared_error', 'mean_absolute_error']: v = getattr(metrics, m)(subset['y_true'], subset['y_pred']) print(f'{m}: {v: .2f}') fig, ax = plt.subplots() errors = subset['y_pred'] - subset['y_true'] bins = np.linspace(-10, 10, 256) ax.hist(errors, bins=bins, density=False) ax.set_xlabel('Error (kcal/mol)') ax.set_ylabel('Frequency') fig.set_size_inches(3.5, 2) fig, ax = plt.subplots(figsize=(3.5, 3.5)) ax.set_title('Clusters with 30 waters') ax.scatter(subset['y_true'], subset['y_pred'], s=0.5, alpha=0.1) ax.set_ylim(-340, -305) ax.set_xlim(ax.get_ylim()) ax.set_ylim(ax.get_xlim()) ax.plot(ax.get_xlim(), ax.get_xlim(), 'k--') ax.set_xlabel('$E$ (kcal/mol), True') ax.set_ylabel('$E$ (kcal/mol), ML') fig.tight_layout() ``` Make a publication-ready figure ``` fig, axs = plt.subplots(1, 3, figsize=(6.5, 2.5)) # Predicted vs actual plots n_waters = results["n_waters"].max() subset = results.query(f'n_waters == {n_waters}') for d, ax, title in zip([results, subset], axs, ['Full Dataset', '30-Water Clusters']): ax.set_title(title) ax.scatter(d['y_true'], d['y_pred'], s=0.7, alpha=0.2, edgecolor='none') max_ = max(ax.get_xlim()[1], ax.get_ylim()[1]) min_ = min(ax.get_xlim()[0], ax.get_ylim()[0]) ax.set_xlim([min_, max_]) ax.set_ylim(ax.get_xlim()) ax.plot(ax.get_xlim(), ax.get_xlim(), 'k--') ax.set_xlabel('$E$ (kcal/mol), True') ax.set_ylabel('$E$ (kcal/mol), ML') mae = metrics.mean_absolute_error(d['y_true'], d['y_pred']) r2 = metrics.r2_score(d['y_true'], d['y_pred']) ax.text(0.99, 0, f'MAE: {mae:.2f}\n$R^2$: {r2:.2f}', ha='right', va='bottom', transform=ax.transAxes, fontsize=10) # Box and wisker plot ax = axs[2] error_stats = [] for s, subset in results.groupby('n_waters'): error = np.abs(subset['y_pred'] - subset['y_true']) / s error_stats.append({'size': s, 'mae': error.mean()}) error_stats = pd.DataFrame(error_stats) ax.plot(error_stats['size'], error_stats['mae'], '--o', ms=3) ax.set_xlabel('# Waters') ax.set_ylabel('MAE (kcal/mol/water)') # Add figure labels for ax, l in zip(axs[:2], ['a', 'b']): ax.text(0.02, 0.9, f'({l})', transform=ax.transAxes) axs[2].text(0.82, 0.9, '(c)', transform=axs[2].transAxes) fig.tight_layout() fig.savefig(os.path.join('figures', 'mpnn-performance.png'), dpi=320) ``` ## Make the Box Plot To match Jenna's ``` results['abs_error_per_water'] = np.abs(results['y_true'] - results['y_pred']) / results['n_waters'] def make_box_plot(df, metric='abs_error_per_water'): boxplot = df.query('n_waters >= 10 and n_waters <= 30').boxplot(metric, 'n_waters', grid=False, fontsize=20, figsize=(12,6), return_type='both') plt.ylim(-0.01,0.7) plt.ylabel('Absolute Error\n(kcal/mol/water)', fontsize=22, fontweight='bold', labelpad=15) plt.xlabel('Cluster Size', fontsize=22, fontweight='bold', labelpad=15) plt.xticks(range(1,23,2), ['10','12','14','16','18','20','22','24','26','28','30']) plt.xlim(0, 22) plt.suptitle('') plt.title('') plt.tight_layout() plt.savefig('figures/mpnn_boxplot-horz.png',dpi=600) make_box_plot(results) ``` ## Evaluate Hyperparameter Sweeps We did some manual hyperparameter tuning for the atomic model ### Batch Sizes Evaluate different batch sizes to get a tradeoff between accuracy and using the full GPU ``` base_query = ('epochs==32 and shuffle_buffer_size==2097152 and activation=="sigmoid" ' 'and message_steps==4 and network_choice=="atomic" and dropout==0 and features==64') model_info.query(base_query).sort_values('val_loss')[['batch_size', 'loss', 'val_loss', 'mean_squared_error', 'epoch_time']] ``` *Finding*: We get decent accuracy with a batch size of 1024 and still use 90% of the GPU ### Activation Function We evaluated different activation functions for the message steps ``` base_query = ('batch_size==1024 and epochs==32 and shuffle_buffer_size==2097152 ' 'and message_steps==4 and network_choice=="atomic" and dropout==0 and features==64') model_info.query(base_query).sort_values('mean_squared_error')[['activation', 'loss', 'val_loss', 'mean_squared_error', 'epoch_time']] ``` *Finding*: We should go with the softplus. Fastest and most accurate ### Number of Message Passing Layers We compared increasing the number of message passing layers ``` base_query = ('hostname=="lambda3" and shuffle_buffer_size==2097152 and batch_size==1024 and activation=="softplus" and epochs==32 ' 'and network_choice=="atomic"') model_info.query(base_query).sort_values('message_steps')[['network_choice', 'message_steps', 'loss', 'val_loss', 'mean_squared_error', 'epoch_time']] fig, ax = plt.subplots() for label, subset in model_info.query(base_query).sort_values('message_steps').groupby('network_choice'): ax.plot(subset['message_steps'], subset['mean_absolute_error'], '-o', label=label) ax.set_xscale('log', base=2) ax.set_xlabel('Message Steps') ax.set_ylabel('Mean Absolute Error') ax.legend() ``` *Finding*: We need many message passing layers, which can get expensive
github_jupyter
# Multi-center analysis ### Imports ``` import sys sys.path.append('../') from PAINTeR import connectivity # in-house lib used for the RPN-signature from PAINTeR import plot # in-house lib used for the RPN-signature from PAINTeR import model # in-house lib used for the RPN-signature import numpy as np # hi old friend import pandas as pd from sklearn.preprocessing import StandardScaler from nilearn.connectome import ConnectivityMeasure from matplotlib.colors import ListedColormap from matplotlib.colors import Normalize import matplotlib.pyplot as plt import seaborn as sns sns.set_style("white") from sklearn.linear_model import ElasticNet, Ridge from sklearn.feature_selection import SelectKBest, f_regression from sklearn import preprocessing from sklearn.pipeline import Pipeline from sklearn.model_selection import LeaveOneOut, KFold, GroupKFold, LeavePGroupsOut from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score, explained_variance_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_predict from sklearn.model_selection import cross_validate ``` ### Processing parameters ``` thres_mean_FD = 0.15 # mm scrub_threshold = 0.15 # mm thres_perc_scrub = 30 # % scubbed out ``` ### Load all behavioral data ``` # load bochum data df_bochum = pd.read_csv("../res/bochum_sample_excl.csv") df_essen = pd.read_csv("../res/essen_sample_excl.csv") df_szeged = pd.read_csv("../res/szeged_sample_excl.csv") df_bochum['study']='bochum' df_essen['study']='essen' df_szeged['study']='szeged' df=pd.concat((df_bochum, df_essen, df_szeged), sort=False) df=df.reset_index() df.groupby('study').hist('mean_QST_pain_sensitivity', bins=6) ``` ### Load standardized scrubbed timeseries ``` timeseries = [] perc_scrubbed = [] for i, f in enumerate(df['ts_file']): f = '..' + f.split('/..')[1] f_scrub = f.split('.tsv')[0] + '-scrubbed.tsv' ts = pd.read_csv(f_scrub).iloc[:,1:] # here we can omit global signal... fd_file = df["fd_file"].values[i] fd_file = '..' + fd_file.split('/..')[1] fd = pd.read_csv(fd_file).values.ravel().tolist() fd = [0] + fd perc_scrubbed.append(100 - 100*len(ts.shape)/len(fd) ) timeseries.append(ts.values) #region names labels=ts.columns.values l = pd.read_csv('../data/atlas_relabeled.tsv', sep="\t") modules=np.insert(l['modules'].values, 0, "GlobSig") # plot a specific timeseries sub_idx=10 pd.DataFrame(timeseries[sub_idx], columns=ts.columns.values).loc[:, ['AINS_pd', 'AINS_v', 'PINS_v']].plot() ``` ### Calculate connectivity ``` correlation_measure = ConnectivityMeasure(kind='partial correlation', vectorize=True, discard_diagonal=True) X = correlation_measure.fit_transform(timeseries) # these are the features mat=correlation_measure.mean_ #mat=mat[1:, 1:] #fisrt row and column is global signal mat[range(mat.shape[0]), range(mat.shape[0])] = 0 # zero diag # 3d plot in browser window #coords = plotting.find_parcellation_cut_coords("../data/atlas_relabeled.nii.gz") #view = plotting.view_connectome(mat, coords) #view.open_in_browser() plot.plot_matrix(mat, labels, modules) y = df.mean_QST_pain_sensitivity sns.distplot(y[df.study=='bochum'], hist=False, rug=True) sns.distplot(y[df.study=='essen'], hist=False, rug=True) sns.distplot(y[df.study=='szeged'], hist=False, rug=True) print(X.shape, len(y)) ``` ### Group data to get balanced splits in a 30-fold cross-validation ``` plt.figure(figsize=(12, 0.3)) sns.heatmap([df.study.astype("category").cat.codes.values]).set_title('study center') plt.show() n_szeged = np.sum(df.study == 'szeged') # size of the smallest study n_essen = np.sum(df.study == 'essen') n_bochum = np.sum(df.study == 'bochum') print(n_bochum, n_essen, n_szeged) groups=np.zeros(len(df), dtype=int) g=0 i=0 while i < n_bochum: groups[i] = g #groups[i+1] = g i += 1 g += 1 g=0 i=n_bochum while i < n_bochum+n_essen: groups[i] = g #groups[i+1] = g i += 1 g += 1 g=0 i=n_bochum+n_essen while i < len(df): groups[i] = g i += 1 g += 1 plt.figure(figsize=(12, 0.3)) sns.heatmap([groups]).set_title('groups') plt.show() groups ``` ## Model training - non nested ``` def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(), fsel=SelectKBest(f_regression), model=ElasticNet(max_iter=100000), p_grid = {'fsel__k': [25, 50, 100, 1000, 3000, 'all'], 'model__alpha': [ 0.001, 0.01, 0.1, 1, 10], 'model__l1_ratio': [0.0001, .25, .5, .75, 0.9999] }): mymodel = Pipeline( [('scaler', scaler), ('fsel', fsel), ('model', model)]) return mymodel, p_grid model, p_grid = pipe_scale_fsel_elnet() cv = GroupKFold(30) clf = GridSearchCV(estimator=model, param_grid=p_grid, cv=cv, scoring="neg_mean_squared_error", verbose=True, return_train_score=False, n_jobs=-1) clf.fit(X, y, groups=groups) print("**** Non-nested analysis ****") print("** Best hyperparameters: " + str(clf.best_params_)) print("** Score on full data as training set:\t" + str(-mean_squared_error(y_pred=clf.best_estimator_.predict(X), y_true=y))) print("** Score on mean as model: " + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("** Best Non-nested cross-validated score on test:\t" + str(clf.best_score_)) print("XXXXX Explained Variance: " + str( 1 - clf.best_score_ / -mean_squared_error(np.repeat(y.mean(), len(y)), y))) cv_pred = cross_val_predict(clf.best_estimator_, X, y, cv=cv, groups=groups, n_jobs=-1) plot.plot_prediction(y, predicted, sd=True, covar=[]) #for train_index, test_index in group_kfold.split(X, y, groups): # #print("TRAIN:", train_index, "TEST:", test_index) # #print(df.study[train_index].values) # print('test:', df.study[test_index].values) ``` ## Model training - nested ``` def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(), fsel=SelectKBest(f_regression), model=ElasticNet(max_iter=100000), p_grid = {'fsel__k': [25, 2000, 4000, 6000], 'model__alpha': [ 0.001, 0.01, 0.1, 1], 'model__l1_ratio': [0.0001, .25, .5, .75, 0.9999] }): mymodel = Pipeline( [('scaler', scaler), ('fsel', fsel), ('model', model)]) return mymodel, p_grid model, p_grid = pipe_scale_fsel_elnet() cv = GroupKFold(30) clf = GridSearchCV(estimator=model, param_grid=p_grid, cv=cv, scoring="neg_mean_squared_error", verbose=True, return_train_score=False, n_jobs=-1) clf.fit(X, y, groups=groups) print("**** Non-nested analysis ****") print("** Best hyperparameters: " + str(clf.best_params_)) print("** Score on full data as training set:\t" + str(-mean_squared_error(y_pred=clf.best_estimator_.predict(X), y_true=y))) print("** Score on mean as model: " + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("** Best Non-nested cross-validated score on test:\t" + str(clf.best_score_)) print("XXXXX Explained Variance: " + str( 1 - clf.best_score_ / -mean_squared_error(np.repeat(y.mean(), len(y)), y))) cv_pred = cross_val_predict(clf.best_estimator_, X, y, cv=cv, groups=groups, n_jobs=-1) plot.plot_prediction(y, predicted, sd=True, covar=[]) #for train_index, test_index in group_kfold.split(X, y, groups): # #print("TRAIN:", train_index, "TEST:", test_index) # #print(df.study[train_index].values) # print('test:', df.study[test_index].values) def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(), fsel=SelectKBest(f_regression), model=ElasticNet(max_iter=100000), p_grid = {'fsel__k': [10, 50, 100, 200, 500, 700, 1000, 2000, 3000, 4000, 5000, 'all'], 'model__alpha': [.001, .01, .1, 1, 10], 'model__l1_ratio': [0.001, .1, .3, .5, .7, .9, .999] #p_grid = {'fsel__k': [1000, 2000, 5000], 'model__alpha': [.001, .005, .01, .05, .1], 'model__l1_ratio': [.999] }): mymodel = Pipeline( [('scaler', scaler), ('fsel', fsel), ('model', model)]) return mymodel, p_grid model, p_grid = pipe_scale_fsel_elnet() outer_cv = GroupKFold(30) inner_cv = GroupKFold(30) clf = GridSearchCV(estimator=model, param_grid=p_grid, cv=inner_cv, scoring="neg_mean_squared_error", verbose=True, return_train_score=False, n_jobs=-1) all_models = [] best_params = [] predicted = np.zeros(len(y)) nested_scores_train = np.zeros(outer_cv.get_n_splits(X)) nested_scores_test = np.zeros(outer_cv.get_n_splits(X)) print("model\tinner_cv mean score\touter vc score") i=0 for train, test in outer_cv.split(X, y, groups=groups): group_train = groups[train] clf.fit(X[train], y[train], groups=group_train) print(str(clf.best_params_) + " " + str(clf.best_score_) + " " + str(clf.score(X[test], y[test]))) all_models.append(clf.best_estimator_) best_params.append(clf.best_params_) predicted[test] = clf.predict(X[test]) nested_scores_train[i] = clf.best_score_ nested_scores_test[i] = clf.score(X[test], y[test]) i = i+1 print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train.mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test.mean())) print("Explained Variance: " + str( 1- nested_scores_test.mean()/-mean_squared_error(np.repeat(y.mean(), len(y)), y) )) print("Correlation: " + str(np.corrcoef(y, predicted)[0,1])) plot.plot_prediction(y, predicted, sd=True, covar=[]) print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train.mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test.mean())) print("Explained Variance: " + str( 1- nested_scores_test.mean()/-mean_squared_error(np.repeat(y.mean(), len(y)), y) )) print("Correlation: " + str(np.corrcoef(y, predicted)[0,1])) plot.plot_prediction(y, predicted, sd=True, covar=[]) ``` ## Finalize and save model ## Obtain predictive network and compare to the RPN-signature
github_jupyter
(tune-mnist-keras)= # Using Keras & TensorFlow with Tune ```{image} /images/tf_keras_logo.jpeg :align: center :alt: Keras & TensorFlow Logo :height: 120px :target: https://www.keras.io ``` ```{contents} :backlinks: none :local: true ``` ## Example ``` import argparse import os from filelock import FileLock from tensorflow.keras.datasets import mnist import ray from ray import tune from ray.tune.schedulers import AsyncHyperBandScheduler from ray.tune.integration.keras import TuneReportCallback def train_mnist(config): # https://github.com/tensorflow/tensorflow/issues/32159 import tensorflow as tf batch_size = 128 num_classes = 10 epochs = 12 with FileLock(os.path.expanduser("~/.data.lock")): (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 model = tf.keras.models.Sequential( [ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(config["hidden"], activation="relu"), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(num_classes, activation="softmax"), ] ) model.compile( loss="sparse_categorical_crossentropy", optimizer=tf.keras.optimizers.SGD(lr=config["lr"], momentum=config["momentum"]), metrics=["accuracy"], ) model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0, validation_data=(x_test, y_test), callbacks=[TuneReportCallback({"mean_accuracy": "accuracy"})], ) def tune_mnist(num_training_iterations): sched = AsyncHyperBandScheduler( time_attr="training_iteration", max_t=400, grace_period=20 ) analysis = tune.run( train_mnist, name="exp", scheduler=sched, metric="mean_accuracy", mode="max", stop={"mean_accuracy": 0.99, "training_iteration": num_training_iterations}, num_samples=10, resources_per_trial={"cpu": 2, "gpu": 0}, config={ "threads": 2, "lr": tune.uniform(0.001, 0.1), "momentum": tune.uniform(0.1, 0.9), "hidden": tune.randint(32, 512), }, ) print("Best hyperparameters found were: ", analysis.best_config) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--smoke-test", action="store_true", help="Finish quickly for testing" ) parser.add_argument( "--server-address", type=str, default=None, required=False, help="The address of server to connect to if using " "Ray Client.", ) args, _ = parser.parse_known_args() if args.smoke_test: ray.init(num_cpus=4) elif args.server_address: ray.init(f"ray://{args.server_address}") tune_mnist(num_training_iterations=5 if args.smoke_test else 300) ``` ## More Keras and TensorFlow Examples - {doc}`/tune/examples/includes/pbt_memnn_example`: Example of training a Memory NN on bAbI with Keras using PBT. - {doc}`/tune/examples/includes/tf_mnist_example`: Converts the Advanced TF2.0 MNIST example to use Tune with the Trainable. This uses `tf.function`. Original code from tensorflow: https://www.tensorflow.org/tutorials/quickstart/advanced - {doc}`/tune/examples/includes/pbt_tune_cifar10_with_keras`: A contributed example of tuning a Keras model on CIFAR10 with the PopulationBasedTraining scheduler.
github_jupyter
An example showing how different online solvers perform on the hand-written digits dataset. #### New to Plotly? Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! ### Version ``` import sklearn sklearn.__version__ ``` ### Imports This tutorial imports [train_test_split](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html#sklearn.model_selection.train_test_split), [SGDClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.SGDClassifier), [Perceptron](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html#sklearn.linear_model.Perceptron), [PassiveAggressiveClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.PassiveAggressiveClassifier.html#sklearn.linear_model.PassiveAggressiveClassifier) and [LogisticRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression). ``` import plotly.plotly as py import plotly.graph_objs as go import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import SGDClassifier, Perceptron from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import LogisticRegression ``` ### Calculations ``` heldout = [0.95, 0.90, 0.75, 0.50, 0.01] rounds = 20 digits = datasets.load_digits() X, y = digits.data, digits.target classifiers = [ ("SGD", SGDClassifier()), ("ASGD", SGDClassifier(average=True)), ("Perceptron", Perceptron()), ("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge', C=1.0)), ("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge', C=1.0)), ("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0])) ] xx = 1. - np.array(heldout) ``` ### Plot Results ``` data = [] for name, clf in classifiers: print("training %s" % name) rng = np.random.RandomState(42) yy = [] for i in heldout: yy_ = [] for r in range(rounds): X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=i, random_state=rng) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) yy_.append(1 - np.mean(y_pred == y_test)) yy.append(np.mean(yy_)) trace = go.Scatter(x=xx, y=yy, mode='lines', name=name) data.append(trace) layout = go.Layout(xaxis=dict(title="Proportion train"), yaxis=dict(title="Test Error Rate") ) fig = go.Figure(data=data, layout=layout) py.iplot(fig) ``` ### License Author: Rob Zinkov <[email protected]> License: BSD 3 clause ``` from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'Comparing Various Online Solvers.ipynb', 'scikit-learn/plot-sgd-comparison/', 'Comparing Various Online Solvers | plotly', ' ', title = 'Comparing Various Online Solvers | plotly', name = 'Comparing Various Online Solvers', has_thumbnail='true', thumbnail='thumbnail/sgd-comparision.jpg', language='scikit-learn', page_type='example_index', display_as='linear_models', order=17, ipynb= '~Diksha_Gabha/3220') ```
github_jupyter
``` # Useful for debugging %load_ext autoreload %autoreload 2 # Nicer plotting import matplotlib import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = 'retina' matplotlib.rcParams['figure.figsize'] = (8,4) ``` # Disgten example Similar to the simple example, but generating particles with Distgen ``` from distgen import Generator YAML=""" n_particle: 10000 random_type: hammersley start: type: cathode MTE: value: 414 units: meV total_charge: value: 250 units: pC r_dist: n_sigma_cutoff: 1.5 sigma_xy: value: 0.4 units: mm type: radial_gaussian t_dist: type: superposition dists: d1: type: gaussian avg_t: units: ps value: -1 sigma_t: units: ps value: 1 d2: type: gaussian avg_t: units: ps value: 1 sigma_t: units: ps value: 1 """ G = Generator(YAML) # Tune the two dist separation G['t_dist:dists:d1:avg_t:value'] = -1 G['t_dist:dists:d2:avg_t:value'] = 1 G.run() GP = G.particles GP.plot('t') GP.plot('pz') from impact import Impact import matplotlib.pyplot as plt import os ifile = 'templates/lcls_injector/ImpactT.in' os.path.exists(ifile) # Make Impact object I = Impact(ifile, initial_particles = G.particles, verbose=True) # This will use the initial particles I.write_initial_particles(update_header=True) # Change some things I.header['Nx'] = 16 I.header['Ny'] = 16 I.header['Nz'] = 16 I.header['Dt'] = 5e-13 # Turn Space Charge off I.header['Bcurr'] = 0 # Other switches I.timeout = 1000 # Switches for MPI I.use_mpi=True I.header['Nprow'] = 1 I.header['Npcol'] = 4 # Change stop location I.stop = 1.5 #I.ele['stop_1']['s'] = I.ele['OTR2']['s']+.001 I.run() I.input.keys() I.output.keys() I.output['stats'].keys() I.output['slice_info'].keys() ``` # Particles ``` # Particles are automatically parsed in to openpmd-beamphysics ParticleGroup objects I.output['particles'] PI = I.output['particles']['initial_particles'] PF = I.output['particles']['final_particles'] # Original particles GP.plot('t', 'pz') # Readback of initial particles from Impact-T. PI.plot('t', 'pz') # The initial time was shifted to account for this I.header['Tini'] # Get the final particles, calculate some statistic P = I.output['particles']['final_particles'] P['mean_energy'] # Show the units P.units('mean_energy') P.plot('z', 'pz') ``` # Stats ``` # Impact's own calculated statistics can be retieved len(I.stat('norm_emit_x')), I.stat('norm_emit_x')[-1] # Compare these. key1 = 'mean_z' key2 = 'sigma_x' units1 = str(I.units(key1)) units2 = str(I.units(key2)) plt.xlabel(key1+f' ({units1})') plt.ylabel(key2+f' ({units2})') plt.plot(I.stat(key1), I.stat(key2)) plt.scatter( [I.particles[name][key1] for name in I.particles], [I.particles[name][key2] for name in I.particles], color='red') ``` # Archive, and restart from the middle ``` afile = I.archive() I2 = Impact(verbose=False) I2.load_archive(afile) # Patch in these particles I2.initial_particles = I2.particles['YAG02'] # Turn off cathode start I2.header['Flagimg'] = 0 I2.configure() # Run again I2.use_mpi=True I2.run() # Compare these. key1 = 'mean_z' key2 = 'sigma_x' units1 = str(I.units(key1)) units2 = str(I.units(key2)) plt.xlabel(key1+f' ({units1})') plt.ylabel(key2+f' ({units2})') plt.plot(I.stat(key1), I.stat(key2), color='black', label='original run') plt.plot(I2.stat(key1), I2.stat(key2), color='red', label='restart run') plt.scatter( [I.particles[name][key1] for name in I.particles], [I.particles[name][key2] for name in I.particles], color='black') plt.scatter( [I2.particles[name][key1] for name in I2.particles], [I2.particles[name][key2] for name in I2.particles], color='red', marker='x') plt.legend() # Cleanup os.remove(afile) ```
github_jupyter
``` #https://pytorch.org/tutorials/beginner/pytorch_with_examples.html ``` # MNIST Dataset ### http://yann.lecun.com/exdb/mnist/ ### The MNIST database of handwritten digits, available from this page, has a training set of 60,000 examples, and a test set of 10,000 examples. It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image. ``` import matplotlib.pyplot as plt import h5py #pip install h5py -- https://www.h5py.org/ #load train f = h5py.File('MNISTdata.hdf5', 'r') train_x, train_y = f['x_train'][:], f['y_train'][:,0] f.close() print("train_x", train_x.shape, train_x.dtype) #each image is stored in 784*1 numpy.ndarray, basically 28*28 image type(train_x) plt.imshow(train_x[0].reshape(28, 28)), train_y[0] import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.utils.data import torch.optim as optim import torch.backends.cudnn as cudnn import numpy as np import os import os.path import argparse from torch.autograd import Variable class FNN(nn.Module):#Fully connected Neural Network """FNN.""" def __init__(self): """FNN Builder.""" super(FNN, self).__init__() self.fc_layer = nn.Sequential( nn.Linear(784, 100),#100 is the number of hidden nodes in the hidden layer nn.ReLU(inplace=True), nn.Linear(100, 10) ) #self.layer1 = nn.Linear(784, 100) #self.layer2 = nn.ReLU(inplace=True) #self.layer3 = nn.Linear(100, 10) def forward(self, x): """Perform forward.""" x = self.fc_layer(x) return x #x = self.layer1(x) #x = self.layer2(x) #x = self.layer3(x) #y = self.fc_layer(x) #return y # 784*100 + 100*10 - NN # 784 def calculate_accuracy(loader, is_gpu): """Calculate accuracy. Args: loader (torch.utils.data.DataLoader): training / test set loader is_gpu (bool): whether to run on GPU Returns: tuple: (overall accuracy, class level accuracy) """ correct = 0 total = 0 for data in loader: inputs, labels = data if is_gpu: inputs = inputs.cuda() labels = labels.cuda() inputs, labels = Variable(inputs), Variable(labels) outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) # forward + backward + optimize outputs = net(inputs)#forward total += labels.size(0) #correct += (predicted == labels).sum() correct += (predicted == labels[:,0].T).sum() return 100*correct.item()/float(total) parser = argparse.ArgumentParser() # hyperparameters settings parser.add_argument('--lr', type=float, default=0.001, help='learning rate') parser.add_argument('--wd', type=float, default=5e-4, help='weight decay')#lr/(c+wd) parser.add_argument('--epochs', type=int, default=50, help='number of epochs to train') parser.add_argument('--batch_size_train', type=int, default=16, help='training set input batch size') parser.add_argument('--batch_size_test', type=int, default=16, help='test set input batch size') parser.add_argument('--is_gpu', type=bool, default=False, help='whether training using GPU') import sys sys.argv=[''] del sys # parse the arguments opt = parser.parse_args() f = h5py.File('MNISTdata.hdf5','r') x_test_set=np.float32(f['x_test'][:]) y_test_set=np.int32(np.array(f['y_test'][:,0])).reshape(-1,1) x_train_set=np.float32(f['x_train'][:]) y_train_set=np.int32(np.array(f['y_train'][:,0])).reshape(-1,1) f.close() #num_samples = y_train_set.shape[0] #y_train_set = y_train_set.reshape(1, num_samples) #y_train_set = np.eye(10)[y_train_set.astype('int32')] #y_train_set = y_train_set.T.reshape(10, num_samples) #num_samples = y_test_set.shape[0] #y_test_set = y_test_set.reshape(1, num_samples) #y_test_set = np.eye(10)[y_test_set.astype('int32')] #y_test_set = y_test_set.T.reshape(10, num_samples) trainset = torch.utils.data.TensorDataset(torch.Tensor(x_train_set), torch.Tensor(y_train_set)) # create your datset trainloader = torch.utils.data.DataLoader( trainset, batch_size=opt.batch_size_train, shuffle=True) #mini-batch gradient, stochastic gradient descent - 1 sample testset = torch.utils.data.TensorDataset(torch.Tensor(x_test_set), torch.Tensor(y_test_set)) # create your datset testloader = torch.utils.data.DataLoader( testset, batch_size=opt.batch_size_test, shuffle=False) type(trainset), type(trainloader) # create the FNN instance net = FNN() # For training on GPU, transfer net and data into the GPU if opt.is_gpu: net = net.cuda() net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) cudnn.benchmark = True else: print('Training on CPU') # Loss function and optimizer criterion = nn.CrossEntropyLoss()#N dim -> prob (softmax) -> CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=opt.lr, weight_decay=opt.wd)#a variant of SGD for epoch in range(opt.epochs): running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data #if training on GPU, wrap the data into the cuda if opt.is_gpu: inputs = inputs.cuda() labels = labels.cuda() # wrap them in Variable inputs, labels = Variable(inputs), Variable(labels) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs)#forward loss = criterion(outputs, labels[:, 0].long()) loss.backward()#compute gradients optimizer.step()#descent # calculate loss running_loss += loss.data.item() # Normalizing the loss by the total number of train batches running_loss /= len(trainloader) # Calculate training/test set accuracy of the existing model train_accuracy = calculate_accuracy(trainloader, opt.is_gpu) test_accuracy = calculate_accuracy(testloader, opt.is_gpu) print("Iteration: {0} | Loss: {1} | Training accuracy: {2}% | Test accuracy: {3}%".format( epoch+1, running_loss, train_accuracy, test_accuracy)) loss, loss.requires_grad outputs labels[:, 0].long() ``` # Without Pytorch ``` import h5py import numpy as np import argparse def sigmoid(x): """ define scale function """ return np.exp(x)/(1.0+np.exp(x)) def RELU(x): return np.np.maximum(x,0) def reluDerivative(x): return np.array([reluDerivativeSingleElement(xi) for xi in x]) def reluDerivativeSingleElement(xi): if xi > 0: return 1 elif xi <= 0: return 0 def compute_loss(Y,V): L_sum = np.sum(np.multiply(Y, np.log(V))) m = Y.shape[1] L = -(1./m) * L_sum return L def feed_forward(X, params): tempt={} tempt["Z"]=np.matmul(params["W"], X) + params["b1"] tempt["H"]=sigmoid(tempt["Z"]) #tempt["H"]=RELU(tempt["Z"]) tempt["U"]=np.matmul(params["C"], tempt["H"]) + params["b2"] tempt["V"]=np.exp(tempt["U"]) / np.sum(np.exp(tempt["U"]), axis=0) return tempt def back_propagate(X, Y, params, tempt, m_batch): # X is m*n matrix # Y is m*1 matrix # tempt is the value in each neural cell dU=tempt["V"]-Y # the loss of output layer dC=(1. / m_batch) * np.matmul(dU, tempt["H"].T) db2=(1. / m_batch) * np.sum(dU, axis=1, keepdims=True) dH=np.matmul(params["C"].T, dU) dZ = dH * sigmoid(tempt["Z"]) * (1 - sigmoid(tempt["Z"])) #dZ=dH*reluDerivative(tempt["Z"]) dW = (1. / m_batch) * np.matmul(dZ, X.T) db1 = (1. / m_batch) * np.sum(dZ, axis=1, keepdims=True) grads={"dW":dW, "db1":db1, "dC":dC, "db2":db2} return grads #hyperparameters epochs=10 batch_size=1 batchs=np.int32(60000/batch_size) LR=0.01 dh=100#number of hidden nodes #getting 60000 samples of training data and 10000 samples of testing data f=h5py.File('MNISTdata.hdf5','r') x_test_set=np.float32(f['x_test'][:]) y_test_set=np.int32(np.array(f['y_test'][:,0])).reshape(-1,1) x_train_set=np.float32(f['x_train'][:]) y_train_set=np.int32(np.array(f['y_train'][:,0])).reshape(-1,1) f.close() X=np.vstack((x_train_set,x_test_set)) Y=np.vstack((y_train_set,y_test_set)) num_samples=Y.shape[0] Y=Y.reshape(1,num_samples) Y_new = np.eye(10)[Y.astype('int32')] Y_new = Y_new.T.reshape(10, num_samples) X_train, X_test=X[:60000].T, X[60000:].T Y_train, Y_test=Y_new[:,:60000], Y_new[:,60000:] #building fully connected neural network with one hidden layer #initialization of parameters params={"b1":np.zeros((dh,1)), "W":np.random.randn(dh,784)*np.sqrt(1. / 784), "b2":np.zeros((10,1)), "C":np.random.randn(10,dh)*np.sqrt(1. / dh)} #training the network for num_epoches in range(epochs): if (num_epoches > 5): LR = 0.001 if (num_epoches > 10): LR = 0.0001 if (num_epoches > 15): LR = 0.00001 #shuffle the training data shuffle_index=np.random.permutation(X_train.shape[1]) X_train= X_train[:, shuffle_index] Y_train=Y_train[:, shuffle_index] for num_batch in range(batchs): left_index=num_batch*batch_size right_index=min(left_index+batch_size,x_train_set.shape[0]-1) m_batch=right_index-left_index X=X_train[:,left_index:right_index] Y=Y_train[:,left_index:right_index] tempt=feed_forward(X, params) grads = back_propagate(X, Y, params, tempt, 1) #gradient descent params["W"] = params["W"] - LR * grads["dW"] params["b1"] = params["b1"] - LR * grads["db1"] params["C"] = params["C"] - LR * grads["dC"] params["b2"] = params["b2"] - LR * grads["db2"] #compute loss on training data tempt = feed_forward(X_train, params) train_loss = compute_loss(Y_train, tempt["V"]) #compute loss on test set tempt=feed_forward(X_test, params) test_loss = compute_loss(Y_test, tempt["V"]) total_correct=0 for n in range(Y_test.shape[1]): p = tempt["V"][:,n] prediction = np.argmax(p) if prediction == np.argmax(Y_test[:,n]): total_correct+=1 accuracy = np.float32(total_correct) / (Y_test.shape[1]) #print(params) print("Epoch {}: training loss = {}, test loss = {}, accuracy={}".format( num_epoches + 1, train_loss, test_loss, accuracy)) ``` # ML Model with JD Data ``` import pandas as pd import matplotlib.pyplot as plt import numpy as np import statsmodels.api as sm from scipy import stats #read/write data from/to local files prefix_path = 'JD_data/' # 'skus' table skus = pd.read_csv(prefix_path + 'JD_sku_data.csv') # 'users' table users = pd.read_csv(prefix_path + 'JD_user_data.csv') # 'clicks' table clicks = pd.read_csv(prefix_path + 'JD_click_data.csv') # 'orders' table orders = pd.read_csv(prefix_path + 'JD_order_data.csv') # 'delivery' table delivery = pd.read_csv(prefix_path + 'JD_delivery_data.csv') # 'inventory' table inventory = pd.read_csv(prefix_path + 'JD_inventory_data.csv') # 'network' table network = pd.read_csv(prefix_path + 'JD_network_data.csv') orders['order_date'] = pd.to_datetime(orders['order_date']) orders['weekday'] = orders['order_date'].dt.dayofweek df_temp = orders[['weekday','final_unit_price']] #Add dummy variables df_temp1 = pd.get_dummies(df_temp['weekday'], prefix='weekday') cols_to_keep = ['final_unit_price'] df_temp = df_temp[cols_to_keep].join(df_temp1.iloc[:,0:])#not df_temp1.ix[:,0:], consider the gender case df_temp['intercept'] = 1 train_cols_ = df_temp.columns[1:]#can write ['x1', 'x2'] manually train_df = df_temp[train_cols_] opt2 = parser.parse_args() trainset_JD = torch.utils.data.TensorDataset(torch.Tensor(train_df.values), torch.Tensor(df_temp['final_unit_price'].values)) # create your datset trainloader_JD = torch.utils.data.DataLoader( trainset_JD, batch_size=opt2.batch_size_train, shuffle=True) class FNN_JD(nn.Module): """FNN.""" def __init__(self): """FNN Builder.""" super(FNN_JD, self).__init__() self.fc_layer = nn.Sequential( nn.Linear(8, 4), nn.ReLU(inplace=True), nn.Linear(4, 1) ) #self.fc_layer = nn.Sequential( # nn.Linear(8, 4), # nn.ReLU(inplace=True), # nn.Linear(4, 2), # nn.ReLU(inplace=True), # nn.Linear(2, 1) #) def forward(self, x): """Perform forward.""" x = self.fc_layer(x) return x # create the FNN instance net_JD = FNN_JD() # For training on GPU, transfer net and data into the GPU if opt2.is_gpu: net_JD = net.cuda() net_JD = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) cudnn.benchmark = True else: print('Training on CPU') # Loss function and optimizer criterion_JD = nn.MSELoss() optimizer_JD = optim.Adam(net_JD.parameters(), lr=opt2.lr, weight_decay=opt2.wd) train_df for epoch in range(opt2.epochs): running_loss = 0.0 for i, data in enumerate(trainloader_JD, 0): # get the inputs inputs, prices = data #if training on GPU, wrap the data into the cuda if opt2.is_gpu: inputs = inputs.cuda() prices = prices.cuda() # wrap them in Variable inputs, prices = Variable(inputs), Variable(prices) # zero the parameter gradients optimizer_JD.zero_grad() # forward + backward + optimize outputs = net_JD(inputs) loss = criterion_JD(outputs[:,0], prices) loss.backward() optimizer_JD.step() # calculate loss running_loss += loss.data.item() # Normalizing the loss by the total number of train batches #running_loss /= len(trainloader) # Calculate training/test set accuracy of the existing model #train_accuracy = calculate_accuracy(trainloader, opt.is_gpu) print("Iteration: {0} | Loss: {1}".format( epoch+1, running_loss)) #sum of squared error opt2.batch_size_train * 197859128 ``` ## Ways to improve accuracy: ### 1. hyperparameter tuning: different algorithm and learning rate - SGD, different loss function, batch size ### 2. different network structures, different activiation layer ### 3. more features/inputs # Compare with Linear Regression ``` import statsmodels.api as sm df_temp = orders[['weekday','final_unit_price']] #Add dummy variables df_temp1 = pd.get_dummies(df_temp['weekday'], prefix='weekday') cols_to_keep = ['final_unit_price'] df_temp = df_temp[cols_to_keep].join(df_temp1.iloc[:,1:])#not df_temp1.ix[:,0:], consider the gender case df_temp['intercept'] = 1 train_cols_ = df_temp.columns[1:]#can write ['x1', 'x2'] manually train_df = df_temp[train_cols_] linear_model = sm.OLS(df_temp['final_unit_price'], train_df) res = linear_model.fit() print(res.summary()) res.params coef = res.params.values x = train_df.values y = df_temp['final_unit_price'] loss = 0 for i in range(len(y)): predict = np.dot(coef, x[i]) loss += (predict - y[i])**2 loss # 8*4 + 4*1 # 7 ```
github_jupyter
# List, Set, and Dictionary Comprehensions In our prior session we discussed a variety of loop patterns. One of the most common patterns that we encounter in practice is the need to iterate through a list of values, transform the elements of the list using some operations, filter out the results, and return back a new list of values. ## Example Let's examine again our example with the NBA teams and franchise names: ``` nba_teams = [ "Atlanta Hawks", "Boston Celtics", "Brooklyn Nets", "Charlotte Hornets", "Chicago Bulls", "Cleveland Cavaliers", "Dallas Mavericks", "Denver Nuggets", "Detroit Pistons", "Golden State Warriors", "Houston Rockets", "Indiana Pacers", "LA Clippers", "Los Angeles Lakers", "Memphis Grizzlies", "Miami Heat", "Milwaukee Bucks", "Minnesota Timberwolves", "New Orleans Pelicans", "New York Knicks", "Oklahoma City Thunder", "Orlando Magic", "Philadelphia 76ers", "Phoenix Suns", "Portland Trail Blazers", "Sacramento Kings", "San Antonio Spurs", "Toronto Raptors", "Utah Jazz", "Washington Wizards" ] print("The list contains", len(nba_teams), "teams") franchise_names = [] # We create an empty list for team in nba_teams: # We iterate over all elements of the list # Do some operation on the list element "team" # and get back the result "franchise" franchise = team.split()[-1] # Append the "franchise" element in the list that we created before the loop franchise_names.append(franchise) ``` And below we re-write the code above as a **list comprehension**. ``` franchise_names = [ team.split()[-1] for team in nba_teams ] ``` In other words, list comprehensions give us the ability to write a very common loop pattern as a one-liner. However, it is not just about brevity; when we see code that uses a list comprehension we understand quickly that the code is processing one list to create another, and the various elements are together in a very specific order. Such a clarity is not guaranteed with a loop, as loops may have many uses. ## Defining List Comprehensions The syntax of list comprehensions is based on the way mathematicians define sets and lists, a syntax that leaves it clear what the contents should be. For example `S` is a set of the square of all integer numbers from 0 to 9. In math notation, we write: + `S = {x² : x in {0 ... 9}}` Python's list comprehensions give a very natural way to write statements just like these. It may look strange early on, but it becomes a very natural and concise way of creating lists, without having to write for-loops. Let's see again the comparison with for loops: ``` # This code below will create a list with the squares # of the numbers from 0 to 9 S = [] # we create an empty list for i in range(10): # We iterate over all numbers from 0 to 9 S.append(i*i) # We add in the list the square of the number i print(S )# we print(the list) S = [i*i for i in range(10)] print(S) ``` Let's do one more example. The `V` is the powers of 2 from $2^0$ until $2^{12}$: + `V = (1, 2, 4, 8, ..., 2¹²)` ``` V=[] # Create a list for i in range(13): # Change i to be from 0 to 12 V.append(2**i) # Add 2**i in the new list print(V) # And rewritten as a list comprehension: V = [2**i for i in range(13)] print(V) ``` Again notice the structure: ```python newlist = [] for i in somelist: x = do_something_with(i) newlist.append(x) ``` gets rewritten as ```python newlist = [do_something_with(i) for i in somelist] ``` ## The *if* statement within a list comprehension Now let's consider the following case. We want to process the list of NBA teams, and keep in a list the teams that have a franchise name that contains a given substring. In the example below, we will try to find all the teams that start with the letter `B`. ``` nba_teams = [ "Atlanta Hawks", "Boston Celtics", "Brooklyn Nets", "Charlotte Hornets", "Chicago Bulls", "Cleveland Cavaliers", "Dallas Mavericks", "Denver Nuggets", "Detroit Pistons", "Golden State Warriors", "Houston Rockets", "Indiana Pacers", "LA Clippers", "Los Angeles Lakers", "Memphis Grizzlies", "Miami Heat", "Milwaukee Bucks", "Minnesota Timberwolves", "New Orleans Pelicans", "New York Knicks", "Oklahoma City Thunder", "Orlando Magic", "Philadelphia 76ers", "Phoenix Suns", "Portland Trail Blazers", "Sacramento Kings", "San Antonio Spurs", "Toronto Raptors", "Utah Jazz", "Washington Wizards" ] franchise_names = [] look_for = 'B' #looking for team in nba_teams: franchise = team.split()[-1] if franchise.startswith(look_for): franchise_names.append(franchise) print(franchise_names) ``` This pattern, where we do not add *all* the elements in the resulting list is also very common. List comprehensions allow such patterns to be also expressed as list comprehensions ``` look_for = 'B' franchise_names = [team.split()[-1] for team in nba_teams if team.split()[-1].startswith(look_for)] print(franchise_names) # Alternatively, you can even break the lines within a comprehension # This may help with readability franchise_names = [team.split()[-1] for team in nba_teams if team.split()[-1].startswith(look_for)] print(franchise_names) ``` Here is another example, with a list comprehension. We have `S` is a set of the square of all integer numbers from 0 to 9, and we define `M` to be all the elements in `S` that are even. In math notation: + `S = {x² : x in {0 ... 9}}` + `M = {x | x in S and x even}` Now let's write the above as list comprehensions. **Note the list comprehension for deriving M uses a "if statement" to filter out those values that aren't of interest**, restricting to only the even squares. ``` S = [i*i for i in range(10)] print(S) M = [] for i in S: # iterate through all elements in S if i%2 == 0: # if i is an event number M.append(i) # ..add it to the list print(M) M = [x for x in S if x%2 == 0] print(M) ``` These are simple examples, using numerical compuation. Let's see a more "practical" use: In the following operation we transform a string into an list of values, a more complex operation: ``` sentence = 'The quick brown fox jumps over the lazy dog' words = [(w.upper(), w.lower(), len(w)) for w in sentence.split()] words ``` So, what the code does here? It takes as input the string `sentence`, creates a list of words, and for each word it creates a tuple, with the word in uppercase, lowercase, together with the length of the word. ## Set and Dictionary Comprehensions In addition to _list_ comprehensions, we also have the same principle for sets and dictionaries. We can create sets and dictionaries in the same way, but now we do not use square brackets to surround the comprehension, but use braces instead. ``` # Creating a set instead of a list. S = {i*i for i in range(10)} S # Dictionary comprehension, where team name becomes the key, and franchise name the value teams_franchise = {team:team.split()[-1] for team in nba_teams} teams_franchise # Dictionary comprehension, where team name becomes the key, and franchise name the value words = {w:len(w) for w in sentence.split()} words ``` ## Exercise You are given the sentence 'The quick brown fox jumps over the lazy dog', ``` sentence = 'The quick brown fox jumps over the lazy dog' ``` **Question 1**: List each word and its length from the string 'The quick brown fox jumps over the lazy dog', conditioned on the length of the word being four characters and above **Question 2**: List only words with the letter o in them ``` # List each word and its length from the string # 'The quick brown fox jumps over the lazy dog', # conditioned on the length of the word being four characters and above ``` ### Solution ``` [ (word, len(word)) for word in sentence.split() if len(word)>=4] # List only words with the letter o in them ``` ### Solution ``` [ word for word in sentence.split() if 'o' in word] ``` ## Exercise We will work now on a more challenging exercise. This will not only require the use of comprehensions, but will also ask you to put together things that we learned earlier in the course, especially when we studied strings. **Question 1**: You are given the `wsj` article below. Write a list comprehension for getting the words that appear more than once. * Use the `.split()` command for splitting, without passing a parameter. * When counting words, case does not matter (i.e., YAHOO is the same as Yahoo). **Question 2**: Find all the *characters* in the article that are not letters or numbers. You can use the isdigit() and isalpha() functions, which work on strings. (e.g, `"Panos".isalpha()` and `"1234".isdigit()` return True) ``` wsj = """ Yahoo Inc. disclosed a massive security breach by a “state-sponsored actor” affecting at least 500 million users, potentially the largest such data breach on record and the latest hurdle for the beaten-down internet company as it works through the sale of its core business. Yahoo said certain user account information—including names, email addresses, telephone numbers, dates of birth, hashed passwords and, in some cases, encrypted or unencrypted security questions and answers—was stolen from the company’s network in late 2014 by what it believes is a state-sponsored actor. Yahoo said it is notifying potentially affected users and has taken steps to secure their accounts by invalidating unencrypted security questions and answers so they can’t be used to access an account and asking potentially affected users to change their passwords. Yahoo recommended users who haven’t changed their passwords since 2014 do so. It also encouraged users change their passwords as well as security questions and answers for any other accounts on which they use the same or similar information used for their Yahoo account. The company, which is working with law enforcement, said the continuing investigation indicates that stolen information didn't include unprotected passwords, payment-card data or bank account information. With 500 million user accounts affected, this is the largest-ever publicly disclosed data breach, according to Paul Stephens, director of policy and advocacy with Privacy Rights Clearing House, a not-for-profit group that compiles information on data breaches. No evidence has been found to suggest the state-sponsored actor is currently in Yahoo’s network, and Yahoo didn’t name the country it suspected was involved. In August, a hacker called “Peace” appeared in online forums, offering to sell 200 million of the company’s usernames and passwords for about $1,900 in total. Peace had previously sold data taken from breaches at Myspace and LinkedIn Corp. """ # getting the words that appear more than once ``` ### Solution ``` words = wsj.lower().split() recurring = [w for w in words if words.count(w)>1] print(recurring) print(sorted(set(recurring))) # Find all the *characters* in the article that are not letters or numbers ``` ### Solution ``` # Let's use a set comprehension here, to eliminate duplicates nonalphanumeric = {c for c in wsj if not c.isdigit() and not c.isalpha()} print(nonalphanumeric) ```
github_jupyter
# *Circuitos Elétricos I - Semana 10* ### Problema 1 (Problema 7.19 - Nilsson) Para o circuito abaixo, pede-se: <img src="./figures/J13C1.png" width="400"> a) Determine a tensão $v_0(t)$ sobre o indutor de $48\;mH$ para $t\geq0$.\ b) Determine a corrente $i_0(t)$ sobre o indutor de $48\;mH$ para $t\geq0$.\ c) Determine a energia consumida pelo resistor de $2.5\;k\Omega$ no intervalo $0\leq t \leq\infty$. Link para a simulação do circuito: https://tinyurl.com/yj69udn8 ``` # valores das indutâncias L1 = 20e-3 L2 = 80e-3 L3 = 48e-3 # valores iniciais das correntes i1_0 = 5e-3 i2_0 = 5e-3 i3_0 = 0 # indutância equivalente Leq1 = (L2*L3)/(L2+L3) Leq = L1 + Leq1 print('Leq = ', Leq/1e-3, ' mH') R = 2.5e3 # constante de tempo τ = Leq/R print('τ = ', τ, ' s') import sympy as sp iL_inf = 0 iL_0 = i1_0 # define as variável tempo t = sp.symbols('t') # define i(t) iL = iL_inf + (iL_0 - iL_inf)*sp.exp(-t/τ) print('Corrente no indutor equivalente:') print('iL(t) = ', iL/1e-3 , ' mA') # calcula v0 v0 = Leq1*sp.diff(iL,t) print('v0(t) = ', v0 , ' V') # correntes nos indutores em função da tensão aplicada aos terminais i1 = iL i2 = (1/L2)*sp.integrate(v0, (t, 0, t)) + i2_0 i3 = (1/L3)*sp.integrate(v0, (t, 0, t)) + i3_0 print('Correntes nos indutores:') print('i1(t) = ', i1/1e-3 , ' mA') print('i2(t) = ', i2/1e-3 , ' mA') print('i3(t) = ', i3/1e-3 , ' mA') # calculando os valores de energia em t=0 E1_0 = (1/2)*L1*(i1.evalf(subs={t:0}))**2 E2_0 = (1/2)*L2*(i2.evalf(subs={t:0}))**2 E3_0 = (1/2)*L3*(i3.evalf(subs={t:0}))**2 print('Energia inicial armazenada nos indutores:') print('E1(0) = %.2f μJ' %(E1_0/1e-6)) print('E2(0) = %.2f μJ' %(E2_0/1e-6)) print('E3(0) = %.2f μJ' %(E3_0/1e-6)) # calculando os valores de energia em t =oo E1_inf = (1/2)*L1*(i1.evalf(subs={t:100}))**2 E2_inf = (1/2)*L2*(i2.evalf(subs={t:100}))**2 E3_inf = (1/2)*L3*(i3.evalf(subs={t:100}))**2 print('Energia final armazenada nos indutores:') print('E1(oo) = %.2f μJ' %(E1_inf/1e-6)) print('E2(oo) = %.2f μJ' %(E2_inf/1e-6)) print('E3(oo) = %.2f μJ' %(E3_inf/1e-6)) # calculando a variação de energia nos indutores ΔE = (E1_inf-E1_0) + (E2_inf-E2_0) + (E3_inf-E3_0) print('Variação da energia armazenada nos indutores:') print('ΔE = %.2f μJ' %(ΔE/1e-6)) # define tensão sobre o resistor vR(t) vR = R*i1 # potência consumida pelo resistor p = vR*i1 # energia consumida pelo resistor E = sp.integrate(p, (t, 0, sp.oo)) print('Energia consumida pelo resistor:') print('E = %.2f μJ' %(E/1e-6)) ```
github_jupyter
``` from __future__ import absolute_import, division, print_function, unicode_literals from IPython import display from matplotlib import pyplot as plt from scipy.ndimage.filters import gaussian_filter1d import pandas as pd import numpy as np import datetime import tensorflow as tf !rm -rf ./logs/ # Load the TensorBoard notebook extension %load_ext tensorboard higgs_path = tf.keras.utils.get_file('HIGGSSmall.csv.gz', 'https://github.com/PacktWorkshops/The-Reinforcement-Learning-Workshop/blob/master/Chapter03/Dataset/HIGGSSmall.csv.gz?raw=true') N_TEST = int(1e3) N_VALIDATION = int(1e3) N_TRAIN = int(1e4) BUFFER_SIZE = int(N_TRAIN) BATCH_SIZE = 500 STEPS_PER_EPOCH = N_TRAIN//BATCH_SIZE N_FEATURES = 28 ds = tf.data.experimental.CsvDataset(higgs_path,[float(),]*(N_FEATURES+1), compression_type="GZIP") def pack_row(*row): label = row[0] features = tf.stack(row[1:],1) return features, label packed_ds = ds.batch(N_TRAIN).map(pack_row).unbatch() validate_ds = packed_ds.take(N_VALIDATION).cache() test_ds = packed_ds.skip(N_VALIDATION).take(N_TEST).cache() train_ds = packed_ds.skip(N_VALIDATION+N_TEST).take(N_TRAIN).cache() test_ds = test_ds.batch(BATCH_SIZE) validate_ds = validate_ds.batch(BATCH_SIZE) train_ds = train_ds.shuffle(BUFFER_SIZE).repeat().batch(BATCH_SIZE) lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay( 0.001, decay_steps=STEPS_PER_EPOCH*1000, decay_rate=1, staircase=False) log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") def compile_and_fit(model, name, max_epochs=3000): optimizer = tf.keras.optimizers.Adam(lr_schedule) model.compile(optimizer=optimizer, loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=[ tf.keras.losses.BinaryCrossentropy( from_logits=True, name='binary_crossentropy'), 'accuracy']) model.summary() tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, profile_batch=0) history = model.fit(train_ds, steps_per_epoch = STEPS_PER_EPOCH, epochs=max_epochs, validation_data=validate_ds, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_binary_crossentropy', patience=200), tensorboard_callback], verbose=2) return history regularization_model = tf.keras.Sequential([ tf.keras.layers.Dense(512, kernel_regularizer=tf.keras.regularizers.l2(0.0001), activation='elu', input_shape=(N_FEATURES,)), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(512, kernel_regularizer=tf.keras.regularizers.l2(0.0001), activation='elu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(512, kernel_regularizer=tf.keras.regularizers.l2(0.0001), activation='elu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(512, kernel_regularizer=tf.keras.regularizers.l2(0.0001), activation='elu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(1) ]) compile_and_fit(regularization_model, "regularizers/regularization", max_epochs=9000) test_accuracy = tf.keras.metrics.Accuracy() for (features, labels) in test_ds: logits = regularization_model(features) probabilities = tf.keras.activations.sigmoid(logits) predictions = 1*(probabilities.numpy() > 0.5) test_accuracy(predictions, labels) regularization_model_accuracy = test_accuracy.result() print("Test set accuracy: {:.3%}".format(regularization_model_accuracy)) %tensorboard --logdir logs/fit ```
github_jupyter
<h1 align=center>The Cobweb Model</h1> Presentation follows <a href="http://www.parisschoolofeconomics.eu/docs/guesnerie-roger/hommes94.pdf">Hommes, <em>JEBO 1994</em></a>. Let $p_t$ denote the <em>observed price</em> of goods and $p_t^e$ the <em>expected price</em> of goods in period $t$. Similarly, let $q_t^d$ denote the <em>quantity demanded</em> of all goods in period $t$ and $q_t^s$ the <em>quantity supplied</em> of all goods in period $t$. \begin{align} q_t^d =& D(p_t) \tag{1} \\ q_t^s =& S(p_t^e) \tag{2} \\ q_t^d =& q_t^s \tag{3} \\ p_t^e =& p_{t-1}^e + w\big(p_{t-1} - p_{t-1}^e\big) = (1 - w)p_{t-1}^e + w p_{t-1} \tag{4} \end{align} Equation 1 says that the quantity demanded of goods in period $t$ is some function of the <em>observed price</em> in period $t$. Equation 2, meanwhile, states that the quantity of goods supplied in period $t$ is a function of the <em>expected price</em> in period $t$. Equation 3 is a market clearing equilibrium condition. Finally, equation 4 is an adaptive expectation formation rule that specifies how goods producers form their expectations about the price of goods in period $t$ as a function of past prices. Combine the equations as follows. Note that equation 3 implies that... $$ D(p_t) = q_t^d = q_t^s = S(p_t^e) $$ ...and therefore, assuming the demand function $D$ is invertible, we can write the observed price of goods in period $t$ as... $$ p_t = D^{-1}\big(S(p_t^e)\big). \tag{5}$$ Substituting equation 5 into equation 4 we arrive at the following difference equation $$ p_{t+1}^e = w D^{-1}\big(S(p_t^e)\big) + (1 - w)p_t^e. \tag{7}$$ ``` %matplotlib inline %load_ext autoreload %autoreload 2 import functools import ipywidgets import matplotlib.pyplot as plt import numpy as np from scipy import optimize import seaborn as sns import cobweb def observed_price(D_inverse, S, expected_price, **params): """The observed price of goods in a particular period.""" actual_price = D_inverse(S(expected_price, **params), **params) return actual_price def adaptive_expectations(D_inverse, S, expected_price, w, **params): """An adaptive expectations price forecasting rule.""" actual_price = observed_price(D_inverse, S, expected_price, **params) price_forecast = w * actual_price + (1 - w) * expected_price return price_forecast ``` <h2> Non-linear supply functions </h2> When thinking about supply it helps to start with the following considerations... <ol> <li> ...when prices are low, the quantity supplied increases slowly because of fixed costs of production (think startup costs, etc). <li> ...when prices are high, supply also increases slowly because of capacity constraints. </ol> These considerations motivate our focus on "S-shaped" supply functions... $$ S_{\gamma}(p_t^e) = -tan^{-1}(-\gamma \bar{p}) + tan^{-1}(\gamma (p_t^e - \bar{p})). \tag{10}$$ The parameter $0 < \gamma < \infty$ controls the "steepness" of the supply function. ``` def quantity_supply(expected_price, gamma, p_bar, **params): """The quantity of goods supplied in period t given the epxected price.""" return -np.arctan(-gamma * p_bar) + np.arctan(gamma * (expected_price - p_bar)) ``` <h3> Exploring supply shocks </h3> Interactively change the value of $\gamma$ to see the impact on the shape of the supply function. ``` ipywidgets.interact? interactive_quantity_supply_plot = ipywidgets.interact(cobweb.quantity_supply_plot, S=ipywidgets.fixed(quantity_supply), gamma=cobweb.gamma_float_slider, p_bar=cobweb.p_bar_float_slider) ``` <h2> Special case: Linear demand functions </h2> Suppose the the quantity demanded of goods is a simple, decresing linear function of the observed price. $$ q_t^d = D(p_t) = a - b p_t \implies p_t = D^{-1}(q_t^d) = \frac{a}{b} - \frac{1}{b}q_t^d \tag{11} $$ ...where $-\infty < a < \infty$ and $0 < b < \infty$. ``` def quantity_demand(observed_price, a, b): """The quantity demand of goods in period t given the price.""" quantity = a - b * observed_price return quantity def inverse_demand(quantity_demand, a, b, **params): """The price of goods in period t given the quantity demanded.""" price = (a / b) - (1 / b) * quantity_demand return price ``` <h3> Exploring demand shocks </h3> Interactively change the values of $a$ and $b$ to get a feel for how they impact demand. Shocks to $a$ shift the entire demand curve; shocks to $b$ change the slope of the demand curve (higher $b$ implies greater sensitivity to price; lower $b$ implies less sensitivity to price). ``` interactive_quantity_demand_plot = ipywidgets.interact(cobweb.quantity_demand_plot, D=ipywidgets.fixed(quantity_demand), a=cobweb.a_float_slider, b=cobweb.b_float_slider) ``` <h2> Supply and demand </h2> Market clearing equilibrium price, $p^*$, satisfies... $$ D(p_t) = S(p_t^e). $$ Really this is also an equilibrium in beliefs because we also require that $p_t = p_t^e$! ``` interactive_supply_demand_plot = ipywidgets.interact(cobweb.supply_demand_plot, D=ipywidgets.fixed(quantity_demand), S=ipywidgets.fixed(quantity_supply), a=cobweb.a_float_slider, b=cobweb.b_float_slider, gamma=cobweb.gamma_float_slider, p_bar=cobweb.p_bar_float_slider) ``` <h2> Analyzing dynamics of the model via simulation... </h2> Model has no closed form solution (i.e., we can not solve for a function that describes $p_t^e$ as a function of time and model parameters). BUT, we can simulate equation 7 above to better understand the dynamics of the model... We can simulate our model and plot time series for different parameter values. Questions for discussion... <ol> <li> Can you find a two-cycle? What does this mean?</li> <li> Can you find higher cycles? Perhaps a four-cycle? Maybe even a three-cycle?</li> <li> Do simulations with similar initial conditions converge or diverge over time? </li> </ol> Can we relate these things to other SFI MOOCS on non-linear dynamics and chaos? Surely yes! ``` model = functools.partial(adaptive_expectations, inverse_demand, quantity_supply) interactive_time_series_plot = ipywidgets.interact(cobweb.time_series_plot, F=ipywidgets.fixed(model), X0=cobweb.initial_expected_price_slider, T=cobweb.T_int_slider, a=cobweb.a_float_slider, b=cobweb.b_float_slider, w=cobweb.w_float_slider, gamma=cobweb.gamma_float_slider, p_bar=cobweb.p_bar_float_slider) ``` <h2> Forecast errors </h2> How do we measure forecast error? What does the distribution of forecast errors look like for different parameters? Could an agent learn to avoid chaos? Specifically, suppose an agent learned to tune the value of $w$ in order to minimize its mean forecast error. Would this eliminate chaotic dynamics? ``` interactive_forecast_error_plot = ipywidgets.interact(cobweb.forecast_error_plot, D_inverse=ipywidgets.fixed(inverse_demand), S=ipywidgets.fixed(quantity_supply), F=ipywidgets.fixed(model), X0=cobweb.initial_expected_price_slider, T=cobweb.T_int_slider, a=cobweb.a_float_slider, b=cobweb.b_float_slider, w=cobweb.w_float_slider, gamma=cobweb.gamma_float_slider, p_bar=cobweb.p_bar_float_slider) ``` <h2> Other things of possible interest? </h2> Impulse response functions? Compare constrast model predictions for rational expectations, naive expectations, adaptive expectations. Depending on what Cars might have in mind, we could also add other expectation formation rules from his more recent work and have students analyze those...
github_jupyter
# Band Ratios Conflations This notebook steps through how band ratio measures are underdetermined. By 'underdetermined', we mean that the same value, or same change in value between measures, can arise from different underlying causes. This shows that band ratios are a non-specific measure. As an example case, we use the theta-beta ratio. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt from fooof import FOOOF from fooof.sim import gen_power_spectrum from fooof.plts.spectra import (plot_spectrum, plot_spectra, plot_spectrum_shading, plot_spectra_shading) # Import custom project code import sys sys.path.append('../bratios') from ratios import calc_band_ratio from paths import FIGS_PATHS as fp # Settings SAVE_FIG = False PLOT_TITLES = True # Whether to plot titles on each axis # Plot settings shade_color = '#0365C0' # Band Settings theta_band = [4, 8] beta_band = [20, 30] # Set up index helpers cf_ind = 0 pw_ind = 1 bw_ind = 2 # Simulated power spectra settings freq_range = [1, 35] freq_res = 0.1 nlv = 0 # Define default aperiodic values ap_def = [0, 1] # Define default periodic values theta_def = [6, 0.4, 1] alpha_def = [10, 0.5, 0.75] beta_def = [25, 0.3, 1.5] ``` ## Comparing Band Ratio Values First, let's consider a hypothetical investigation comparing band ratio measures between two groups. The typical interpretation of finding a difference between measured band ratios would be that there is a difference in the relative powers of the oscillation bands used in the calculation of the band ratio. That is to say, the change in ratio could come from a change in two things (the power of the low band, and/or the power of the high band). Here, we will show that there are actually many more ways in which one could measure this difference. A numerically identically change in theta / beta ratio can be obtained from: #### Periodic Changes - a change in theta power - a change in theta bandwidth - a change in beta center frequency - a change in beta power - a change in beta bandwidth #### Aperiodic Changes - a change in aperiodic exponent - with or without oscillations present Note that the specific values in the simulations below have been tuned to create numerically identical changes in measured band ratio. ``` # Create a baseline PSD, with oscillations, to compare to freqs, ps_base = gen_power_spectrum(freq_range, ap_def, [theta_def, alpha_def, beta_def], nlv, freq_res) ``` ### Periodic Changes ``` ## CF # Change in center frequency - high band beta_cf = beta_def.copy(); beta_cf[cf_ind] = 19.388 freqs, ps_be_cf = gen_power_spectrum(freq_range, ap_def, [theta_def, alpha_def, beta_cf], nlv, freq_res) ## PW # Changes in oscillation power - low band theta_pw = theta_def.copy(); theta_pw[pw_ind] = 0.5041 freqs, ps_th_pw = gen_power_spectrum(freq_range, ap_def, [theta_pw, alpha_def, beta_def], nlv, freq_res) # Changes in oscillation power - high band beta_pw = beta_def.copy(); beta_pw[pw_ind] = 0.1403 freqs, ps_be_pw = gen_power_spectrum(freq_range, ap_def, [theta_def, alpha_def, beta_pw], nlv, freq_res) ## BW # Changes in oscillation bandwidth - low band theta_bw = theta_def.copy(); theta_bw[bw_ind] = 1.61 freqs, ps_th_bw = gen_power_spectrum(freq_range, ap_def, [theta_bw, alpha_def, beta_def], nlv, freq_res) # Changes in oscillation bandwidth - high band beta_bw = beta_def.copy(); beta_bw[bw_ind] = 0.609 freqs, ps_be_bw = gen_power_spectrum(freq_range, ap_def, [theta_def, alpha_def, beta_bw], nlv, freq_res) # Changes in other band - center frequency alpha_cf = alpha_def.copy(); alpha_cf[cf_ind] = 8.212 freqs, ps_al_cf = gen_power_spectrum(freq_range, ap_def, [theta_def, alpha_cf, beta_def], nlv, freq_res) # Changes in other band - bandwidth alpha_bw = alpha_def.copy(); alpha_bw[bw_ind] = 1.8845 freqs, ps_al_bw = gen_power_spectrum(freq_range, ap_def, [theta_def, alpha_bw, beta_def], nlv, freq_res) # Collect all the power spectra together spectra_data = {'Theta Frequency' : None, 'Theta Power' : ps_th_pw, 'Theta Bandwidth' : ps_th_bw, 'Alpha Frequency' : ps_al_cf, 'Alpha Power' : None, 'Alpha Bandwidth' : ps_al_bw, 'Beta Frequency' : ps_be_cf, 'Beta Power' : ps_be_pw, 'Beta Bandwidth' : ps_be_bw} # Calcualte theta beta ratio of the baseline power spectrum base_br = calc_band_ratio(freqs, ps_base, theta_band, beta_band) # Calculate changes in theta / beta ratios diffreqs = {} for label, spectra in spectra_data.items(): if np.all(spectra): comp_br = calc_band_ratio(freqs, spectra, theta_band, beta_band) diffreqs[label] = base_br - comp_br # Check the computed ratio values of each spectrum print('TBR of base spectrum is: {:1.3f}'.format(base_br)) print('TBR of comp spectrum is: {:1.3f}'.format(comp_br)) # Check TBR difference measures from periodic changes for label, diff in diffreqs.items(): print('TBR difference from {:20} is \t {:1.3f}'.format(label, diff)) # Create figure of periodic changes title_settings = {'fontsize': 16, 'fontweight': 'bold'} fig, ax = plt.subplots(3, 3, figsize=(15, 14)) for axis, (title, data) in zip(ax.flatten(), spectra_data.items()): if not np.all(data): continue plot_spectra_shading(freqs, [ps_base, data], [theta_band, beta_band], shade_colors=shade_color, log_freqs=False, log_powers=True, ax=axis) if PLOT_TITLES: axis.set_title(title, **title_settings) axis.set_xlim([0, 35]) axis.set_ylim([-1.75, 0]) axis.xaxis.label.set_visible(False) axis.yaxis.label.set_visible(False) # Turn off empty axes ax[0, 0].axis('off') ax[1, 1].axis('off') fig.subplots_adjust(hspace=.3) fig.subplots_adjust(wspace=.3) if SAVE_FIG: plt.savefig(fp.make_file_path(fp.demo, 'Underdetermined-Periodic', 'pdf')) ``` Each panel above plots two PSDs, where the blue curve is the same reference power spectrum plotted in all panels, and the orange is a unique comparison spectrum. The difference between TBR from the blue and orange curve is the same (see cell above) across each panel. This shows that multiple spectral parameters could change to arrive at identical differences in a ratio measure. #### Periodic Notes Note that for a given change (or direction of change) in theta / beta ratio (TBR), there is only one center frequency change that could do it. This is true for the case, as is simulated, in which the 'baseline' spectrum has oscillations entirely within band ranges. In this example, the change is a relative increase in 'theta', and there is no way to increase relative theta by changing theta CF alone. This is due to the choice of comparison spectrum, and in another scenario, theta CF could also change measured ratio measures. ### Aperiodic Changes The same change in ratio can also be driven from changes in aperiodic properties. This can happen with or without oscillations even being present. ``` # Change in aperiodic exponent ap_shift = [0.13, 1.1099] freqs, ps_ap_ex = gen_power_spectrum(freq_range, ap_shift, [theta_def, alpha_def, beta_def], nlv, freq_res) # Use a new base and transformation, without any oscillations freqs, ps_new_base = gen_power_spectrum(freq_range, ap_def, [], nlv, freq_res) ap_shift = [0.13, 1.1417] freqs, ps_new_apch = gen_power_spectrum(freq_range, ap_shift, [], nlv, freq_res) # Calculate the differences in ratio from baseline spectra d_ap_osc = base_br - calc_band_ratio(freqs, ps_ap_ex, theta_band, beta_band) d_ap_no_osc = calc_band_ratio(freqs, ps_new_base, theta_band, beta_band) - \ calc_band_ratio(freqs, ps_new_apch, theta_band, beta_band) # Check TBR difference measures from aperiodic changes base_text = 'TBR difference from the aperiodic component ' print(base_text + 'with oscillations is \t {:1.3f}'.format(d_ap_osc)) print(base_text + 'without oscillations is \t {:1.3f}'.format(d_ap_no_osc)) # Collect together components to plot ap_bases = [ps_base, ps_new_base] ap_diffs = [ps_ap_ex, ps_new_apch] # Create aperiodic differences figure fig, ax = plt.subplots(2, 1, figsize=(5, 9)) for ps_base, ps_diff, axis in zip(ap_bases, ap_diffs, ax.flatten()): plot_spectra_shading(freqs, [ps_base, ps_diff], [theta_band, beta_band], shade_colors=shade_color, log_freqs=False, log_powers=True, ax=axis) if PLOT_TITLES: axis.set_title('Aperiodic Exponent', **title_settings) # Plot Aesthetics axis.set_xlim([0, 35]) axis.set_ylim([-1.75, 0]) axis.xaxis.label.set_visible(False) axis.yaxis.label.set_visible(False) fig.subplots_adjust(wspace=.3) if SAVE_FIG: plt.savefig(fp.make_file_path(fp.demo, 'Underdetermined-Aperiodic', 'pdf')) ``` #### Conclusions In this example, we have explored changes to measured band ratios by varying different spectral parameters. Given an observed change in a BandRatio measure, there is no way to tell what has actually changed. Variations in multiple spectral parameters can lead to the exact same change in ratio measure. There is no reason to think the change even reflects oscillatory activity, given that aperiodic shifts can drive this effect. In this notebook, we simulated variations in one parameter at a time, but in practice, all of these changes could happen together. In subsequent notebooks, we will further characterize these findings by simulating changes in each parameter, to estimate how impactful different parameters are to ratio measures, as well as by simulating concurrent changes in multiple parameters, to explore the interaction between changes. ## Same Ratio, Different Spectra So far we have seen how multiple possible changes in power spectra can lead to the same measured difference in band ratio measures across power spectra. What if we calculate band ratio measures and find that they are the same? Can we infer that the analyzed power spectra are in some ways equivalent? Next, let's examine if and how different power spectra can have the same band ratio value. ``` # Create a collection of spectra with different properties, with the same measured ratio value freqs, ps1 = gen_power_spectrum(freq_range, [0, 0.9059], [theta_def, alpha_def, beta_def], nlv, freq_res) freqs, ps2 = gen_power_spectrum(freq_range, [0, 0.9059], [[6, 0.5, 2], alpha_def, [25, 0.3544, 5]], nlv, freq_res) freqs, ps3 = gen_power_spectrum(freq_range, [0.25, 1.2029], [[6, 0.10, 1], alpha_def, beta_def], nlv, freq_res) freqs, ps4 = gen_power_spectrum(freq_range, [0.25, 1.2029], [theta_def, alpha_def, [25, 0.66444, 1.5]], nlv, freq_res) # Collect the generated spectra together spectra_list = [ps1, ps2, ps3, ps4] # Calculate the ratio value for each spectrum for spectrum in spectra_list: print('Ratio value:\t {:1.3f}'.format(calc_band_ratio(freqs, ps1, theta_band, beta_band))) # Plot all the power spectra together plot_spectra_shading(freqs, spectra_list, [theta_band, beta_band], shade_colors=shade_color, linewidth=3, log_freqs=False, log_powers=True) if SAVE_FIG: plt.savefig(fp.make_file_path(fp.demo, 'EquivalentRatioSpectra', 'pdf')) ``` In the plot above, we can see four different power spectra. However, each of these power spectra has the exact same measured theta / beta ratio value. Thus we can conclude that measuring the same band ratio value for different power spectra should not be taken to imply that they are in any way equivalent.
github_jupyter
# Python Data Analysis ## Introduction In this lab, we'll make use of everything we've learned about pandas, data cleaning, and simple data analysis. In order to complete this lab, you'll have to import, clean, combine, reshape, and visualize data to answer questions provided, as well as your own questions! ## Objectives You will be able to: - Practice opening and inspecting the contents of CSVs using pandas dataframes - Practice identifying and handling missing values - Practice identifying and handling invalid values - Practice cleaning text data by removing whitespace and fixing typos - Practice joining multiple dataframes ## Your Task: Clean the Superheroes Dataset with Pandas ### Data Understanding In this lab, we'll work with a version of the comprehensive Superheroes Dataset, which can be found on [Kaggle](https://www.kaggle.com/claudiodavi/superhero-set/data) and was originally scraped from [SuperHeroDb](https://www.superherodb.com/). We have modified the structure and contents of the dataset somewhat for the purposes of this lab. Note that this data was collected in June 2017, so it may not reflect the most up-to-date superhero lore. The data is contained in two separate CSV files: 1. `heroes_information.csv`: each record represents a superhero, with attributes of that superhero (e.g. eye color). Height is measured in centimeters, and weight is measured in pounds. 2. `super_hero_powers.csv`: each record represents a superpower, then has True/False values representing whether each superhero has that power ### Business Understanding The business questions you have been provided are: 1. What is the distribution of superheroes by publisher? 2. What is the relationship between height and number of superpowers? And does this differ based on gender? 3. What are the 5 most common superpowers in Marvel Comics vs. DC Comics? This lab also simulates something you are likely to encounter at some point or another in your career in data science: someone has given you access to a dataset, as well as a few questions, and has told you to "find something interesting". So, in addition to completing the basic data cleaning tasks and the aggregation and reshaping tasks needed to answer the provided questions, you will also need to formulate a question of your own and perform any additional cleaning/aggregation/reshaping that is needed to answer it. ### Requirements #### 1. Load the Data with Pandas Create a dataframes `heroes_df` and `powers_df` that represent the two CSV files. Use pandas methods to inspect the shape and other attributes of these dataframes. #### 2. Perform Data Cleaning Required to Answer First Question The first question is: *What is the distribution of superheroes by publisher?* In order to answer this question, you will need to: * Identify and handle missing values * Identify and handle text data requiring cleaning #### 3. Perform Data Aggregation and Cleaning Required to Answer Second Question The second question is: *What is the relationship between height and number of superpowers? And does this differ based on gender?* In order to answer this question, you will need to: * Join the dataframes together * Identify and handle invalid values #### 4. Perform Data Aggregation Required to Answer Third Question The third question is: *What are the 5 most common superpowers in Marvel Comics vs. DC Comics?* This should not require any additional data cleaning or joining of tables, but it will require some additional aggregation. #### 5. Formulate and Answer Your Own Question This part is fairly open-ended. Think of a question that can be answered with the available data, and perform any cleaning or aggregation required to answer that question. ## 1. Load the Data with Pandas In the cell below, we: * Import and alias `pandas` as `pd` * Import and alias `numpy` as `np` * Import and alias `seaborn` as `sns` * Import and alias `matplotlib.pyplot` as `plt` * Set Matplotlib visualizations to display inline in the notebook ``` # Run this cell without changes import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline ``` ### Superheroes In the cell below, load `heroes_information.csv` as `heroes_df`: ``` # Your code here heroes_df.head() ``` It looks like that CSV came with an index column, resulting in an extra column called `Unnamed: 0`. We don't need that column, so write code to get rid of it below. There are two ways to do this: 1. Re-load with `read_csv`, and specify the parameter `index_col=0` 2. Drop the column `Unnamed: 0` with `axis=1` ``` # Your code here heroes_df.head() ``` The following code checks that the dataframe was loaded correctly. ``` # Run this cell without changes # There should be 734 rows assert heroes_df.shape[0] == 734 # There should be 10 columns. If this fails, make sure you got rid of # the extra index column assert heroes_df.shape[1] == 10 # These should be the columns assert list(heroes_df.columns) == ['name', 'Gender', 'Eye color', 'Race', 'Hair color', 'Height', 'Publisher', 'Skin color', 'Alignment', 'Weight'] ``` Now you want to get familiar with the data. This step includes: * Understanding the dimensionality of your dataset * Investigating what type of data it contains, and the data types used to store it * Discovering how missing values are encoded, and how many there are * Getting a feel for what information it does and doesn't contain In the cell below, inspect the overall shape of the dataframe: ``` # Your code here ``` Now let's look at the info printout: ``` # Run this cell without changes heroes_df.info() ``` In the cell below, interpret that information. Do the data types line up with what we expect? Are there any missing values? ``` # Replace None with appropriate text """ None """ ``` ### Superpowers Now, repeat the same process with `super_hero_powers.csv`. Name the dataframe `powers_df`. This time, make sure you use `index_col=0` when opening the CSV because the index contains important information. ``` # Your code here (create more cells as needed) ``` The following code will check if it was loaded correctly: ``` # Run this cell without changes # There should be 167 rows, 667 columns assert powers_df.shape == (167, 667) # The first column should be '3-D Man' assert powers_df.columns[0] == '3-D Man' # The last column should be 'Zoom' assert powers_df.columns[-1] == 'Zoom' # The first index should be 'Agility' assert powers_df.index[0] == 'Agility' # The last index should be 'Omniscient' assert powers_df.index[-1] == 'Omniscient' ``` ## 2. Perform Data Cleaning Required to Answer First Question Recall that the first question is: *What is the distribution of superheroes by publisher?* To answer this question, we will only need to use `heroes_df`, which contains the `Publisher` column. ### Identifying and Handling Missing Values As you likely noted above, the `Publisher` column is missing some values. Let's take a look at some samples with and without missing publisher values: ``` # Run this cell without changes has_publisher_sample = heroes_df[heroes_df["Publisher"].notna()].sample(5, random_state=1) has_publisher_sample # Run this cell without changes missing_publisher_sample = heroes_df[heroes_df["Publisher"].isna()].sample(5, random_state=1) missing_publisher_sample ``` What do we want to do about these missing values? Recall that there are two general strategies for dealing with missing values: 1. Fill in missing values (either using another value from the column, e.g. the mean or mode, or using some other value like "Unknown") 2. Drop rows with missing values Write your answer below, and explain how it relates to the information we have: ``` # Replace None with appropriate text """ None """ ``` Now, implement your chosen strategy using code. (You can also check the solution branch for the answer to the question above if you're really not sure.) ``` # Your code here ``` Now there should be no missing values in the publisher column: ``` # Run this cell without changes assert heroes_df["Publisher"].isna().sum() == 0 ``` ### Identifying and Handling Text Data Requiring Cleaning The overall field of natural language processing (NLP) is quite broad, and we're not going to get into any advanced text processing, but it's useful to be able to clean up minor issues in text data. Let's take a look at the counts of heroes grouped by publisher: ``` # Run this cell without changes heroes_df["Publisher"].value_counts() ``` There are two cases where we appear to have data entry issues, and publishers that should be encoded the same have not been. In other words, there are four categories present that really should be counted as two categories (and you do not need specific comic book knowledge to be able to identify them). Identify those two cases below: ``` # Replace None with appropriate text """ None """ ``` Now, write some code to handle these cases. If you're not sure where to start, look at the pandas documentation for [replacing values](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.replace.html) and [stripping off whitespace](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.strip.html). ``` # Your code here ``` Check your work below: ``` # Run this cell without changes heroes_df["Publisher"].value_counts() ``` ### Answering the Question Now we should be able to answer *What is the distribution of superheroes by publisher?* If your data cleaning was done correctly, this code should work without any further changes: ``` # Run this cell without changes # Set up plots fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 5)) # Create variables for easier reuse value_counts = heroes_df["Publisher"].value_counts() top_5_counts = value_counts.iloc[:5] # Plot data ax1.bar(value_counts.index, value_counts.values) ax2.bar(top_5_counts.index, top_5_counts.values) # Customize appearance ax1.tick_params(axis="x", labelrotation=90) ax2.tick_params(axis="x", labelrotation=45) ax1.set_ylabel("Count of Superheroes") ax2.set_ylabel("Count of Superheroes") ax1.set_title("Distribution of Superheroes by Publisher") ax2.set_title("Top 5 Publishers by Count of Superheroes"); ``` ## 3. Perform Data Aggregation and Cleaning Required to Answer Second Question Recall that the second question is: *What is the relationship between height and number of superpowers? And does this differ based on gender?* Unlike the previous question, we won't be able to answer this with just `heroes_df`, since information about height is contained in `heroes_df`, while information about superpowers is contained in `powers_df`. ### Joining the Dataframes Together First, identify the shared key between `heroes_df` and `powers_df`. (Shared key meaning, the values you want to join on.) Let's look at them again: ``` # Run this cell without changes heroes_df # Run this cell without changes powers_df ``` In the cell below, identify the shared key, and your strategy for joining the data (e.g. what will one record represent after you join, will you do a left/right/inner/outer join): ``` # Replace None with appropriate text """ None """ ``` In the cell below, create a new dataframe called `heroes_and_powers_df` that contains the joined data. You can look at the above answer in the solution branch if you're not sure where to start. ***Hint:*** Note that the `.join` method requires that the two dataframes share an index ([documentation here](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.join.html)) whereas the `.merge` method can join using any columns ([documentation here](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.merge.html)). It is up to you which one you want to use. ``` # Your code here (create more cells as needed) ``` Run the code below to check your work: ``` # Run this cell without changes # Confirms you have created a dataframe with the specified name assert type(heroes_and_powers_df) == pd.DataFrame # Confirms you have the right number of rows assert heroes_and_powers_df.shape[0] == 647 # Confirms you have the necessary columns # (If you modified the value of powers_df along the way, you might need to # modify this test. We are checking that all of the powers are present as # columns.) assert [power in heroes_and_powers_df.columns for power in powers_df.index] # (If you modified the value of heroes_df along the way, you mgith need to # modify this as well. We are checking that all of the attribute columns from # heroes_df are present as columns in the joined df) assert [attribute in heroes_and_powers_df.columns for attribute in heroes_df.columns] ``` Now that we have created a joined dataframe, we can aggregate the number of superpowers by superhero. This code is written for you: ``` # Run this cell without changes # Note: we can use sum() with True and False values and they will # automatically be cast to 1s and 0s heroes_and_powers_df["Power Count"] = sum([heroes_and_powers_df[power_name] for power_name in powers_df.index]) heroes_and_powers_df ``` ### Answering the Question Now we can plot the height vs. the count of powers: ``` # Run this cell without changes fig, ax = plt.subplots(figsize=(16, 8)) ax.scatter( x=heroes_and_powers_df["Height"], y=heroes_and_powers_df["Power Count"], alpha=0.3 ) ax.set_xlabel("Height (cm)") ax.set_ylabel("Number of Superpowers") ax.set_title("Height vs. Power Count"); ``` Hmm...what is that stack of values off below zero? What is a "negative" height? ### Identifying and Handling Invalid values One of the trickier tasks in data cleaning is identifying invalid or impossible values. In these cases, you have to apply your domain knowledge rather than any particular computational technique. For example, if you were looking at data containing dates of past home sales, and one of those dates was 100 years in the future, pandas wouldn't flag that as an issue, but you as a data scientist should be able to identify it. In this case, we are looking at heights, which are 1-dimensional, positive numbers. In theory we could have a very tiny height close to 0 cm because the hero is microscopic, but it does not make sense that we would have a height below zero. Let's take a look at a sample of those negative heights: ``` # Run this cell without changes heroes_and_powers_df[heroes_and_powers_df["Height"] < 0].sample(5, random_state=1) ``` It looks like not only are those heights negative, those weights are negative also, and all of them are set to exactly -99.0. It seems like this data source probably filled in -99.0 as the height or weight whenever it was unknown, instead of just leaving it as NaN. Depending on the purpose of the analysis, maybe this would be a useful piece of information, but for our current question, let's go ahead and drop the records where the height is -99.0. We'll make a new temporary dataframe to make sure we don't accidentally delete anything that will be needed in a future question. ``` # Run this cell without changes question_2_df = heroes_and_powers_df[heroes_and_powers_df["Height"] != -99.0].copy() question_2_df ``` ### Answering the Question, Again Now we can redo that plot without those negative heights: ``` # Run this cell without changes fig, ax = plt.subplots(figsize=(16, 8)) ax.scatter( x=question_2_df["Height"], y=question_2_df["Power Count"], alpha=0.3 ) ax.set_xlabel("Height (cm)") ax.set_ylabel("Number of Superpowers") ax.set_title("Height vs. Power Count"); ``` Ok, that makes more sense. It looks like there is not much of a relationship between height and number of superpowers. Now we can go on to answering the second half of question 2: *And does this differ based on gender?* To indicate multiple categories within a scatter plot, we can use color to add a third dimension: ``` # Run this cell without changes fig, ax = plt.subplots(figsize=(16, 8)) # Select subsets question_2_male = question_2_df[question_2_df["Gender"] == "Male"] question_2_female = question_2_df[question_2_df["Gender"] == "Female"] question_2_other = question_2_df[(question_2_df["Gender"] != "Male") & (question_2_df["Gender"] != "Female")] # Plot data with different colors ax.scatter( x=question_2_male["Height"], y=question_2_male["Power Count"], alpha=0.5, color="cyan", label="Male" ) ax.scatter( x=question_2_female["Height"], y=question_2_female["Power Count"], alpha=0.5, color="gray", label="Female" ) ax.scatter( x=question_2_other["Height"], y=question_2_other["Power Count"], alpha=0.5, color="yellow", label="Other" ) # Customize appearance ax.set_xlabel("Height (cm)") ax.set_ylabel("Number of Superpowers") ax.set_title("Height vs. Power Count") ax.legend(); ``` It appears that there is still no clear relationship between count of powers and height, regardless of gender. We do however note that "Male" is the most common gender, and that male superheroes tend to be taller, on average. ## 4. Perform Data Aggregation Required to Answer Third Question Recall that the third question is: *What are the 5 most common superpowers in Marvel Comics vs. DC Comics?* We'll need to keep using `heroes_and_powers_df` since we require information from both `heroes_df` and `powers_df`. Your resulting `question_3_df` should contain aggregated data, with columns `Superpower Name`, `Marvel Comics` (containing the count of occurrences in Marvel Comics), and `DC Comics` (containing the count of occurrences in DC Comics). Each row should represent a superpower. In other words, `question_3_df` should look like this: ![question 3 df](images/question_3.png) Don't worry if the rows or columns are in a different order, all that matters is that you have the right rows and columns with all the data. ***Hint:*** refer to the [documentation for `.groupby`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html) and treat each publisher as a group. ``` # Your code here (create more cells as needed) ``` The code below checks that you have the correct dataframe structure: ``` # Run this cell without changes # Checking that you made a dataframe called question_3_df assert type(question_3_df) == pd.DataFrame # Checking the shape assert question_3_df.shape == (167, 3) # Checking the column names assert sorted(list(question_3_df.columns)) == ['DC Comics', 'Marvel Comics', 'Superpower Name'] ``` ### Answering the Question The code below uses the dataframe you created to find and plot the most common superpowers in Marvel Comics and DC Comics. ``` # Run this cell without changes marvel_most_common = question_3_df.drop("DC Comics", axis=1) marvel_most_common = marvel_most_common.sort_values(by="Marvel Comics", ascending=False)[:5] marvel_most_common # Run this cell without changes dc_most_common = question_3_df.drop("Marvel Comics", axis=1) dc_most_common = dc_most_common.sort_values(by="DC Comics", ascending=False)[:5] dc_most_common # Run this cell without changes fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15, 5)) ax1.bar( x=marvel_most_common["Superpower Name"], height=marvel_most_common["Marvel Comics"] ) ax2.bar( x=dc_most_common["Superpower Name"], height=dc_most_common["DC Comics"] ) ax1.set_ylabel("Count of Superheroes") ax2.set_ylabel("Count of Superheroes") ax1.set_title("Frequency of Top Superpowers in Marvel Comics") ax2.set_title("Frequency of Top Superpowers in DC Comics"); ``` It looks like super strength is the most popular power in both Marvel Comics and DC Comics. Overall, the top 5 powers are fairly similar — 4 out of 5 overlap, although Marvel contains agility whereas DC contains flight. ## 5. Formulate and Answer Your Own Question For the remainder of this lab, you'll be focusing on coming up with and answering your own question, just like we did above. Your question should not be overly simple, and should require both descriptive statistics and data visualization to answer. In case you're unsure of what questions to ask, some sample questions have been provided below. Pick one of the following questions to investigate and answer, or come up with one of your own! * Which powers have the highest chance of co-occurring in a hero (e.g. super strength and flight)? * What is the distribution of skin colors amongst alien heroes? * How are eye color and hair color related in this dataset? Explain your question below: ``` # Replace None with appropriate text: """ None """ ``` Some sample cells have been provided to give you room to work. Feel free to create more cells as needed. Be sure to include thoughtful, well-labeled visualizations to back up your analysis! ## Summary In this lab, you demonstrated your mastery of using pandas to clean and aggregate data in order to answer several business questions. This included identifying and handling missing values, text requiring preprocessing, and invalid values. You also performed aggregation and reshaping tasks such as transposing, joining, and grouping data. Great job, there was a lot here!
github_jupyter
Derived from https://arxiv.org/pdf/1711.07128.pdf ``` import warnings warnings.filterwarnings("ignore") import sys import os import tensorflow as tf # sys.path.append("../libs") sys.path.insert(1, '../') from libs import input_data from libs import models from libs import trainer from libs import freeze flags=tf.app.flags flags=tf.app.flags #Important Directories flags.DEFINE_string('data_dir','..\\..\\_inputs\\raw','Train Data Folder') flags.DEFINE_string('summaries_dir','..\\..\\summaries','Summaries Folder') flags.DEFINE_string('train_dir','..\\..\\logs&checkpoint','Directory to write event logs and checkpoint') flags.DEFINE_string('models_dir','..\\..\\models','Models Folder') #Task Specific Parameters flags.DEFINE_string('wanted_words','yes,no,up,down,left,right,on,off,stop,go','Wanted Words') flags.DEFINE_float('validation_percentage',10,'Validation Percentage') flags.DEFINE_float('testing_percentage',10,'Testing Percentage') flags.DEFINE_integer('sample_rate',16000,'Sample Rate') flags.DEFINE_integer('clip_duration_ms',1000,'Clip Duration in ms') flags.DEFINE_float('window_size_ms',40,'How long each spectogram timeslice is') flags.DEFINE_float('window_stride_ms',20.0,'How far to move in time between frequency windows.') flags.DEFINE_integer('dct_coefficient_count',40,'How many bins to use for the MFCC fingerprint') flags.DEFINE_float('time_shift_ms',100.0,'Range to randomly shift the training audio by in time.') FLAGS=flags.FLAGS model_architecture='ds_cnn' start_checkpoint=None logging_interval=10 eval_step_interval=1000 save_step_interval=1 silence_percentage=10.0 unknown_percentage=10.0 background_frequency=0.8 background_volume=0.3 learning_rate='0.0005,0.0001,0.00002' #Always seperated by comma, trains with each of the learning rate for the given number of iterations train_steps='1000,1000,1000' #Declare the training steps for which the learning rates will be used batch_size=256 model_size_info=[5, 64, 10, 4, 2, 2, 64, 3, 3, 1, 1, 64, 3, 3, 1, 1, 64, 3, 3, 1, 1, 64, 3, 3, 1, 1] remaining_args = FLAGS([sys.argv[0]] + [flag for flag in sys.argv if flag.startswith("--")]) assert(remaining_args == [sys.argv[0]]) train_dir=os.path.join(FLAGS.data_dir,'train','audio') model_settings = models.prepare_model_settings( len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))), FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms, FLAGS.window_stride_ms, FLAGS.dct_coefficient_count) audio_processor = input_data.AudioProcessor( train_dir, silence_percentage, unknown_percentage, FLAGS.wanted_words.split(','), FLAGS.validation_percentage, FLAGS.testing_percentage, model_settings,use_silence_folder=True) def get_train_data(args): sess=args time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000) train_fingerprints, train_ground_truth = audio_processor.get_data( batch_size, 0, model_settings,background_frequency, background_volume, time_shift_samples, 'training', sess) return train_fingerprints,train_ground_truth def get_val_data(args): ''' Input: (sess,offset) ''' sess,i=args validation_fingerprints, validation_ground_truth = ( audio_processor.get_data(batch_size, i, model_settings, 0.0, 0.0, 0, 'validation', sess)) return validation_fingerprints,validation_ground_truth # def get_test_data(args): # ''' # Input: (sess,offset) # ''' # sess,i=args # test_fingerprints, test_ground_truth = audio_processor.get_data( # batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess) # return test_fingerprints,test_ground_truth def main(_): sess=tf.InteractiveSession() # Placeholders fingerprint_size = model_settings['fingerprint_size'] label_count = model_settings['label_count'] fingerprint_input = tf.placeholder( tf.float32, [None, fingerprint_size], name='fingerprint_input') ground_truth_input = tf.placeholder( tf.float32, [None, label_count], name='groundtruth_input') set_size = audio_processor.set_size('validation') label_count = model_settings['label_count'] # Create Model logits, dropout_prob = models.create_model( fingerprint_input, model_settings, model_architecture, model_size_info=model_size_info, is_training=True) #Start Training extra_args=(dropout_prob,label_count,batch_size,set_size) trainer.train(sess,logits,fingerprint_input,ground_truth_input,get_train_data, get_val_data,train_steps,learning_rate,eval_step_interval, logging_interval=logging_interval, start_checkpoint=start_checkpoint,checkpoint_interval=save_step_interval, model_name=model_architecture,train_dir=FLAGS.train_dir, summaries_dir=FLAGS.summaries_dir,args=extra_args) tf.app.run(main=main) # save_checkpoint='..\\..\\logs&checkpoint\\ds_cnn\\ckpt-899' # save_path=os.path.join(FLAGS.models_dir,model_architecture,'%s.pb'%os.path.basename(save_checkpoint)) # freeze.freeze_graph(FLAGS,model_architecture,save_checkpoint,save_path,model_size_info=model_size_info) # save_path=os.path.join(FLAGS.models_dir,model_architecture,'%s-small-batched.pb'%os.path.basename(save_checkpoint)) # freeze.freeze_graph(FLAGS,model_architecture,save_checkpoint,save_path,batched=True,model_size_info=model_size_info) ```
github_jupyter
# "E is for Exploratory Data Analysis: Categorical Data" > What is Exploratory Data Analysis (EDA), why is it done, and how do we do it in Python? - toc: false - badges: True - comments: true - categories: [E] - hide: False - image: images/e-is-for-eda-text/alphabet-close-up-communication-conceptual-278887.jpg ## _What is **Exploratory Data Analysis(EDA)**?_ While I answered these questions in the [last post](https://educatorsrlearners.github.io/an-a-z-of-machine-learning/e/2020/06/15/e-is-for-eda.html), since [all learning is repetition](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=224340), I'll do it again :grin: EDA is an ethos for how we scrutinize data including, but not limited to: - what we look for (i.e. shapes, trends, outliers) - the approaches we employ (i.e. [five-number summary](https://www.statisticshowto.com/how-to-find-a-five-number-summary-in-statistics/), visualizations) - and the decisions we reach{% fn 1 %} ## _Why is it done?_ Two main reasons: 1. If we collected the data ourselves, we need to know if our data suits our needs or if we need to collect more/different data. 2. If we didn't collect the data ourselves, we need to interrogate the data to answer the "5 W's" - __What__ kind of data do we have (i.e. numeric, categorical)? - __When__ was the data collected? There could be more recent data which we could collect which would better inform our model. - __How__ much data do we have? Also, how was the data collected? - __Why__ was the data collected? The original motivation could highlight potential areas of bias in the data. - __Who__ collected the data? Some of these questions can't necessarily be answered by looking at the data alone which is fine because _[nothing comes from nothing](http://parmenides.me/nothing-comes-from-nothing/)_; someone will know the answers so all we have to do is know where to look and whom to ask. ## _How do we do it in Python?_ As always, I'll follow the steps outlined in [_Hands-on Machine Learning with Scikit-Learn, Keras & TensorFlow_](https://github.com/ageron/handson-ml/blob/master/ml-project-checklist.md) ### Step 1: Frame the Problem "Given a set of features, can we determine how old someone needs to be to read a book?" ### Step 2: Get the Data We'll be using the same dataset as in the [previous post](https://educatorsrlearners.github.io/an-a-z-of-machine-learning/e/2020/06/15/e-is-for-eda.html). ### Step 3: Explore the Data to Gain Insights (i.e. EDA) As always, import the essential libraries, then load the data. ``` #hide import warnings; warnings.simplefilter('ignore') #For data manipulation import pandas as pd import numpy as np #For visualization import seaborn as sns import matplotlib.pyplot as plt import missingno as msno url = 'https://raw.githubusercontent.com/educatorsRlearners/book-maturity/master/csv/book_info_complete.csv' df = pd.read_csv(url) ``` To review, ***How much data do we have?*** ``` df.shape ``` - 23 features - one target - 5,816 observations ***What type of data do we have?*** ``` df.info() ``` Looks like mostly categorical with some numeric. Lets take a closer look. ``` df.head().T ``` Again, I collected the data so I know the target is `csm_rating` which is the minimum age Common Sense Media (CSM) says a reader should be for the given book. Also, we have essentially three types of features: - Numeric - `par_rating` : Ratings of the book by parents - `kids_rating` : Ratings of the book by children - :dart:`csm_rating` : Ratings of the books by Common Sense Media - `Number of pages` : Length of the book - `Publisher's recommended age(s)`: Self explanatory - Date - `Publication date` : When the book was published - `Last updated`: When the book's information was updated on the website with the rest of the features being categorical and text; these features will be our focus for today. #### Step 3.1 Housekeeping Clean the feature names to make inspection easier. {% fn 3 %} ``` df.columns df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '') df.columns ``` Much better. Now lets subset the data frame so we only have the features of interest. Given there are twice as many text features compared to non-text features, and the fact that I'm ~~lazy~~ efficient, I'll create a list of the features I ***don't*** want ``` numeric = ['par_rating', 'kids_rating', 'csm_rating', 'number_of_pages', "publisher's_recommended_ages", "publication_date", "last_updated"] ``` and use it to keep the features I ***do*** want. ``` df_strings = df.drop(df[numeric], axis=1) ``` _Voila!_ ``` df_strings.head().T ``` Clearly, the non-numeric data falls into two groups: - text - `description` - `plot` - `csm_review` - `need_to_know` - categories - `author`/`authors` - `genre` - `award`/`awards` - etc. Looking at the output above, so many questions come to mind: 1. How many missing values do we have? 2. How long are the descriptions? 3. What's the difference between `csm_review` and `need_to_know`? 4. Similarly, what the difference between `description` and `plot`? 5. How many different authors do we have in the dataset? 6. How many types of books do we have? and I'm sure more will arise once we start. Where to start? Lets answer the easiest questions first :grin: ## Categories #### ***How many missing values do we have?*** A cursory glance at the output above indicates there are potentially a ton of missing values; lets inspect this hunch visually. ``` msno.bar(df_strings, sort='descending'); ``` Hunch confirmed: 10 the 17 columns are missing values with some being practically empty. To get a precise count, we can use `sidetable`.{% fn 2 %} ``` import sidetable df_strings.stb.missing(clip_0=True, style=True) ``` OK, we have lots of missing values and several columns which appear to be measuring similar features (i.e., authors, illustrators, publishers, awards) so lets inspect these features in pairs. ### `author` and `authors` Every book has an author, even if the author is "[Anonymous](https://bookshop.org/a/9791/9781538718469)," so then why do we essentially have two columns for the same thing? :thinking: `author` is for books with a single writer whereas `authors` is for books with multiple authors like [_Good Omens_](https://bookshop.org/a/9791/9780060853983). Let's test that theory. ``` msno.matrix(df_strings.loc[:, ['author', 'authors']]); ``` *Bazinga!* We have a perfect correlation between missing data for `author` and `authors` but lets' have a look just in case. ``` df_strings.loc[df_strings['author'].isna() & df_strings["authors"].notna(), ['title', 'author', 'authors']].head() df_strings.loc[df_strings['author'].notna() & df_strings["authors"].isna(), ['title', 'author', 'authors']].head() df_strings.loc[df_strings['author'].notna() & df_strings["authors"].notna(), ['title', 'author', 'authors']].head() ``` My curiosity is satiated. Now the question is how to successfully merge the two columns? We could replace the `NaN` in `author` with the: - values in `authors` - word `multiple` - first author in `authors` - more/most popular of the authors in `authors` and I'm sure I could come up with even more if I thought about/Googled it but the key is to understand that no matter what we choose, it will have consequences when we build our model{% fn 3 %}. Next question which comes to mind is: :thinking: ***How many different authors are there?*** ``` df_strings.loc[:, 'author'].nunique() ``` Wow! Nearly half our our observations contain a unique name meaning this feature has [high cardinality](https://www.kdnuggets.com/2016/08/include-high-cardinality-attributes-predictive-model.html). :thinking: ***Which authors are most represented in the data set?*** Lets create a [frequency table](https://www.mathsteacher.com.au/year8/ch17_stat/03_freq/freq.htm) to find out. ``` author_counts = df_strings.loc[:, ["title", 'author']].groupby('author').count().reset_index() author_counts.sort_values('title', ascending=False).head(10) ``` Given that I've scraped the data from a website focusing on children, teens, and young adults, the results above only make sense; authors like [Dr. Seuss](https://bookshop.org/contributors/dr-seuss), [Eoin Coifer](https://bookshop.org/contributors/eoin-colfer-20dba4fd-138e-477e-bca5-75b9fa9bfe2f), and [Lemony Snicket](https://bookshop.org/books?keywords=lemony+snicket) are famous children's authors whereas [Rick Riordan](https://bookshop.org/books?keywords=percy+jackson), [Walter Dean Myers](https://bookshop.org/books?keywords=Walter+Dean+Myers) occupy the teen/young adult space and [Neil Gaiman](https://bookshop.org/contributors/neil-gaiman) writes across ages. :thinking: ***How many authors are only represented once?*** That's easy to check. ``` from matplotlib.ticker import FuncFormatter ax = author_counts['title'].value_counts(normalize=True).nlargest(5).plot.barh() ax.invert_yaxis(); #Set the x-axis to a percentage ax.xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0%}'.format(x))) ``` Wow! So approximately 60% of the authors have one title in our data set. **Why does that matter?** When it comes time to build our model we'll need to either [label encode](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html), [one-hot encode](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html), or [hash](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.FeatureHasher.html) this feature and whichever we decide to do will end up effecting the model profoundly due to the high [cardinality](https://pkghosh.wordpress.com/2017/10/09/combating-high-cardinality-features-in-supervised-machine-learning/) of this feature; however, we'll deal with all this another time :grin:. ### `illustrator` and `illustrators` Missing values can be quite informative. :thinking: What types of books typically have illustrators? :bulb: Children's books! Therefore, if a book's entries for both `illustrator` and `illustrators` is blank, that *probably* means that book doesn't have illustrations which would mean it is *more likely* to be for older children. Let's test this theory in the simplest way I can think of :smile: ``` #Has an illustrator df.loc[df['illustrator'].notna() | df['illustrators'].notna(), ['csm_rating']].hist(); #Doesn't have an illustrator df.loc[df['illustrators'].isna() & df["illustrator"].isna(), ['csm_rating']].hist(); ``` :bulb: *Who* the illustrator is doesn't matter as much as *whether* there is an illustrator. Looks like when I do some feature engineering I'll need to create a `has_illustrator` feature. ### `book_type` and `genre` These two features should be relatively straightforward but we'll have a quick look anyway. `book_type` should be easy because, after a cursory inspection using `head` above, I'd expect to only see 'fiction' or 'non-fiction' but I'll double check. ``` ax_book_type = df_strings['book_type'].value_counts().plot.barh(); ax_book_type.invert_yaxis() ``` Good! The only values I have are the ones I expected but the ratio is highly skewed. :thinking: What impact will this have on our model? `genre` (e.g. fantasy, romance, sci-fi) is a *far* broader topic than `booktype` but how many different genres are represented in the data set? ``` df_strings['genre'].nunique() ``` :roll_eyes: Great What's the breakdown? ``` ax_genre = df_strings['genre'].value_counts().plot.barh(); ax_genre.invert_yaxis() ``` That's not super useful but what if I took 10 most common genres? ``` ax_genre_10 = df_strings['genre'].value_counts(normalize=True).nlargest(10).plot.barh(); ax_genre_10.invert_yaxis() #Set the x axis to percentage ax_genre_10.xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0%}'.format(x))) ``` Hmmm. Looks like approximately half the books fall into one of three genres. :bulb: To reduce dimensionality, recode any genre outside of the top 10 as 'other'. Will save that idea for the feature engineering stage. ### `award` and `awards` Since certain awards (e.g. [The Caldecott Medal](https://cloviscenter.libguides.com/children/caldecott#:~:text=The%20Medal%20shall%20be%20awarded,the%20illustrations%20be%20original%20work.)) are only awarded to children's books whereas others, namely [The RITA Award](https://en.wikipedia.org/wiki/RITA_Award#Winners) is only for "mature" readers. :thinking: Will knowing if a work is an award winner provide insight? :thinking: Which awards are represented? ``` award_ax = df_strings['award'].value_counts().plot.barh() award_ax.invert_yaxis(); awards_ax = df_strings['awards'].str.split(",").explode().str.strip().value_counts().plot.barh() awards_ax.invert_yaxis() ``` Hmmmmm. The Caldecott Medal is for picture books so that should mean the target readers are very young; however, we've already seen that "picture books" is the second most common value in `genre` so being a Caldecott Medal winner won't add much. Also, to be eligible for the other awards, a book needs to be aimed a t children 14 or below so that doesn't really tell us much either. Conclusion: drop this feature. While I could keep going and analyze `publisher`, `publishers`, and `available_on`, I'd be using the exact same techniques as above so, instead, time to move on to... ## Text ### `description`, `plot`, `csm_review`, `need_to_know` Now for some REALLY fun stuff! :thinking: How long are each of these observations? Trying to be as efficient as possible, I'll: - make a list of the features I want ``` variables = ['description', 'plot', 'csm_review', 'need_to_know'] ``` - write a function to: - convert the text to lowercase - tokenize the text and remove [stop words](https://en.wikipedia.org/wiki/Stop_words) - identify the length of each feature ``` from nltk import word_tokenize from nltk.corpus import stopwords stop = stopwords.words('english') def text_process(df, feature): df.loc[:, feature+'_tokens'] = df.loc[:, feature].apply(str.lower) df.loc[:, feature+'_tokens'] = df.loc[:, feature+'_tokens'].apply(lambda x: [item for item in x.split() if item not in stop]) df.loc[:, feature+'_len'] = df.loc[:, feature+'_tokens'].apply(len) return df ``` - loop through the list of variables saving it to the data frame ``` for var in variables: df_text = text_process(df_strings, var) df_text.iloc[:, -8:].head() ``` :thinking: `description` seems to be significantly shorter than the other three. Let's plot them to investigate. ``` len_columns = df_text.columns.str.endswith('len') df_text.loc[:,len_columns].hist(); plt.tight_layout() ``` Yep - `description` is significantly shorter but how do the other three compare. ``` columns = ['plot_len', 'need_to_know_len', 'csm_review_len'] df_text[columns].plot.box() plt.xticks(rotation='vertical'); ``` Hmmm. Lots of outliers for `csm_review` but, in general, the three features are of similar lengths. ### Next Steps While I could create [word clouds](https://www.datacamp.com/community/tutorials/wordcloud-python) to visualize the most frequent words for each feature, or calculate the [sentiment](https://towardsdatascience.com/a-complete-exploratory-data-analysis-and-visualization-for-text-data-29fb1b96fb6a) of each feature, my stated goal is to identify how old someone should be to read a book and not whether a review is good or bad. To that end, my curiosity about these features is satiated so I'm ready to move on to another chapter. ## Summary - :ballot_box_with_check: numeric data - :ballot_box_with_check: categorical data - :black_square_button: images (book covers) Two down; one to go! Going forward, my key points to remember are: ### What type of categorical data do I have? There is a huge difference between ordered (i.e. "bad", "good", "great") and truly nominal data that has no order/ranking like different genres; just because ***I*** prefer science fiction to fantasy, it doesn't mean it actually ***is*** superior. ### Are missing values really missing? Several of the features had missing values which were, in fact, not truly missing; for example, the `award` and `awards` features were mostly blank for a very good reason: the book didn't win one of the four awards recognized by Common Sense Media. In conclusion, both of the points above can be summarized simply by as "be sure to get to know your data." Happy coding! #### Footnotes {{ 'Adapted from [_Engineering Statistics Handbook_](https://www.itl.nist.gov/div898/handbook/eda/section1/eda11.htm)' | fndetail: 1 }} {{ 'Be sure to check out this excellent [post](https://beta.deepnote.com/article/sidetable-pandas-methods-you-didnt-know-you-needed) by Jeff Hale for more examples on how to use this package' | fndetail: 2 }} {{ 'See this post on [Smarter Ways to Encode Categorical Data](https://towardsdatascience.com/smarter-ways-to-encode-categorical-data-for-machine-learning-part-1-of-3-6dca2f71b159)' | fndetail: 3 }} {{ 'Big *Thank You* to [Chaim Gluck](https://medium.com/@chaimgluck1/working-with-pandas-fixing-messy-column-names-42a54a6659cd) for providing this tip' | fndetail: 4 }}
github_jupyter
``` from IPython.display import Image import torch import torch.nn as nn import torch.nn.functional as F import math, random from scipy.optimize import linear_sum_assignment from utils import NestedTensor, nested_tensor_from_tensor_list, MLP Image(filename="figs/model.png", retina=True) ``` This notebook provides a Pytorch implementation for the sequential variant of PRTR (Pose Regression TRansformers) in [Pose Recognition with Cascade Transformers](https://arxiv.org/abs/2104.06976). It is intended to provide researchers interested in sequential PRTR with a concrete understanding that only code can deliver. It can also be used as a starting point for end-to-end top-down pose estimation research. ``` class PRTR_sequential(nn.Module): def __init__(self, backbone, transformer, transformer_kpt, level, x_res=10, y_res=10): super().__init__() self.backbone = backbone self.transformer = transformer hidden_dim = transformer.d_model self.class_embed = nn.Linear(hidden_dim, 2) self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) self.query_embed = nn.Embedding(100, hidden_dim) self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1) self.transformer_kpt = transformer_kpt x_interpolate = torch.linspace(-1.25, 1.25, x_res, requires_grad=False).unsqueeze(0) # [1, x_res], ANNOT ?(1) y_interpolate = torch.linspace(-1.25, 1.25, y_res, requires_grad=False).unsqueeze(0) # [1, y_res] self.register_buffer("x_interpolate", x_interpolate) self.register_buffer("y_interpolate", y_interpolate) self.x_res = x_res self.y_res = y_res self.level = level mask = torch.zeros(1, y_res, x_res, requires_grad=False) # [1, y_res, x_res] self.register_buffer("mask", mask) self.build_pe() ``` Class `PRTR_sequential` needs the following arguments: + backbone: a customizable CNN backbone which returns a pyramid of feature maps with different spatial size + transformer: a customizable Transformer for person detection (1st Transformer) + transformer_kpt: a customizable Transformer for keypoint detection (2nd Transformer) + level: from which layers of pyramid we will extract features + x_res: the width of STN-cropped featrure map fed to 2nd Transformer + y_res: the height of STN-cropped featrure map fed to 2nd Transformer Some annotations: 1. For `x_interpolate` and `y_interpolate`, we use an extended eyesight of 125% to the orginal boudning box to provide more information from backbone to the 2nd Transformer. ``` def build_pe(self): # fixed sine pe not_mask = 1 - self.mask y_embed = not_mask.cumsum(1, dtype=torch.float32) x_embed = not_mask.cumsum(2, dtype=torch.float32) eps = 1e-6; scale = 2 * math.pi # normalize? y_embed = y_embed / (y_embed[:, -1:, :] + eps) * scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * scale num_pos_feats = 128; temperature = 10000 dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=self.mask.device) dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) self.register_buffer("pe", pos) # learnable pe self.row_embed = nn.Embedding(num_pos_feats, self.x_res) self.col_embed = nn.Embedding(num_pos_feats, self.y_res) nn.init.uniform_(self.row_embed.weight) nn.init.uniform_(self.col_embed.weight) def get_leant_pe(self): y_embed = self.col_embed.weight.unsqueeze(-1).expand(-1, -1, self.x_res) x_embed = self.row_embed.weight.unsqueeze(1).expand(-1, self.y_res, -1) embed = torch.cat([y_embed, x_embed], dim=0).unsqueeze(0) return embed PRTR_sequential.build_pe = build_pe PRTR_sequential.get_leant_pe = get_leant_pe ``` Then we build positional embedding for the 2nd Transformer, which ensembles both fixed sinusoidal embedding and learnt embedding. For each box containing person cropped from original image, we use the same positional embedding, irrelevent to where the box is. ``` def forward(self, samples): # the 1st Transformer, to detect person features, pos = self.backbone(samples) hs = self.transformer(self.input_proj(features[-1].tensors), features[-1].mask, self.query_embed.weight, pos[-1])[0][-1] # [B, person per image, f] logits = self.class_embed(hs) # [B, person per image, 2] bboxes = self.bbox_embed(hs).sigmoid() # [B, person per image, 4] outputs = {'pred_logits': logits, 'pred_boxes': bboxes} # some preperation for STN feature cropping person_per_image = hs.size(1) num_person = person_per_image * hs.size(0) heights, widths = samples.get_shape().unbind(-1) # [B] * 2 rh = heights.repeat_interleave(person_per_image) # [person per image * B] rw = widths.repeat_interleave(person_per_image) # [person per image * B] srcs = [features[_].decompose()[0] for _ in self.level] cx, cy, w, h = bboxes.flatten(end_dim=1).unbind(-1) # [person per image * B] * 4 cx, cy, w, h = cx * rw, cy * rh, w * rw, h * rh # ANNOT (1) # STN cropping y_grid = (h.unsqueeze(-1) @ self.y_interpolate + cy.unsqueeze(-1) * 2 - 1).unsqueeze(-1).unsqueeze(-1) # [person per image * B, y_res, 1, 1] x_grid = (w.unsqueeze(-1) @ self.x_interpolate + cx.unsqueeze(-1) * 2 - 1).unsqueeze(-1).unsqueeze(1) # [person per image * B, 1, x_res, 1] grid = torch.cat([x_grid.expand(-1, self.y_res, -1, -1), y_grid.expand(-1, -1, self.x_res, -1)], dim=-1) cropped_feature = [] cropped_pos = [] for j, l in enumerate(self.level): cropped_feature.append(F.grid_sample(srcs[j].expand(num_person, -1, -1, -1), grid, padding_mode="border")) # [person per image * B, C, y_res, x_res] cropped_feature = torch.cat(cropped_feature, dim=1) cropped_pos.append(self.pe.expand(num_person, -1, -1, -1)) cropped_pos.append(self.get_leant_pe().expand(num_person, -1, -1, -1)) cropped_pos = torch.cat(cropped_pos, dim=1) mask = self.mask.bool().expand(num_person, -1, -1) # ANNOT (2) # 2nd Transformer coord, logtis = self.transformer_kpt(bboxes, cropped_feature, cropped_pos, mask) # [person per image * B, 17, 2] outputs["pred_kpt_coord"] = coord.reshape(hs.size(0), -1, self.transformer_kpt.num_queries, 2) outputs["pred_kpt_logits"] = logtis.reshape(hs.size(0), -1, self.transformer_kpt.num_queries, self.transformer_kpt.num_kpts + 1) return outputs PRTR_sequential.forward = forward ``` `forward` method takes in a `NestedTensor` and returns a dictionary of predictions, some annotations: 1. Input `samples` and `features` are `NestedTensor`, which basically stacks a list of tensors of different shapes by their top-left corner and uses masks to denote valid positions. Thus when we need to crop person bounding boxes from the whole feature map, we need to scale boxes according to image size 2. we always gives unmasked image to the 2nd Transformer, becasue all the persons are cropped to the same resolution ``` def infer(self, samples): self.eval() outputs = self(samples) out_logits, out_coord = outputs['pred_kpt_logits'], outputs['pred_kpt_coord'] C_stacked = out_logits[..., 1:].transpose(2, 3).flatten(0, 1).detach().cpu().numpy() # [person per image * B, 17, num queries (for keypoint)] out_coord = out_coord.flatten(0, 1) coord_holder = [] for b, C in enumerate(C_stacked): _, query_ind = linear_sum_assignment(-C) coord_holder.append(out_coord[b, query_ind.tolist()]) matched_coord = torch.stack(coord_holder, dim=0).reshape(out_logits.size(0), out_logits.size(1), 17, -1) return matched_coord # [B, num queries, num kpts, 2] PRTR_sequential.infer = infer ``` `infer` takes the same input as `forward`, but instead of returning all keypoint queries for loss calculaiton, it leverages a Hungarian algorithm to select the 17 keytpoints as prediction. The selection process can be thought of as a bipartite graph matching problem, graph constructed as below: + for each query in 2nd Transformer a node is made, creating set Q + for each keypoint type, a node is made, creating set K + set Q and K are fully inter-connected, edge weight between $Q_i$ and $K_j$ are the _unnormalized logits_ of query $i$ classified as keypoint type $k$ + Q, K have no intra-connection, Hungarian algorithm will find the matching between Q and K with highest edge weights, selected queries are returned as prediction. A minimal example with only 3 queries and 2 keypoint types are shown as below: ![](figs/readout.png) ``` class DETR_kpts(nn.Module): def __init__(self, transformer, num_kpts, num_queries, input_dim): super().__init__() self.num_kpts = num_kpts self.num_queries = num_queries hidden_dim = transformer.d_model self.query_embed = nn.Embedding(num_queries, hidden_dim) self.input_proj = nn.Conv2d(input_dim, hidden_dim, kernel_size=1) self.transformer = transformer self.coord_predictor = MLP(hidden_dim, hidden_dim, 2, num_layers=3) self.class_predictor = nn.Linear(hidden_dim, num_kpts + 1) def forward(self, bboxes, features, pos, mask): src_proj = self.input_proj(features) j_embed = self.transformer(src_proj, mask, self.query_embed.weight, pos)[0][-1] # [B, num queries, hidden dim] j_coord_ = self.coord_predictor(j_embed).sigmoid() x, y = j_coord_.unbind(-1) # [B, Q] * 2 x = (x * 1.25 - 0.625) * bboxes[:, 2].unsqueeze(-1) + bboxes[:, 0].unsqueeze(-1) y = (y * 1.25 - 0.625) * bboxes[:, 3].unsqueeze(-1) + bboxes[:, 1].unsqueeze(-1) x = x.clamp(0, 1) y = y.clamp(0, 1) j_coord = torch.stack([x, y], dim=-1) j_class = self.class_predictor(j_embed[-1]) # [B, J, c+1], logits return j_coord, j_class ``` Class `DETR_kpts` is the 2nd Transformer in PRTR and needs the following arguments: + transformer: a customizable Transformer for keypoint detection (2nd Transformer) + num_kpts: number of keypoint annotations per person of this dataset, e.g., COCO has 17 keypoints + num_queries: query number, similar to DETR + input_dim: image feature dimension from 1st Transformer Its `forward` takes in `bboxes` becasue we need to recover per-person prediction to whole image coordinates, `features`, `pos` and `mask` for Transformer input. `forward` returns predicted keypoint coordinates in 0 to 1, relative to whole image, and their probability belonging to each keypoint class, e.g. nose, left shoulder.
github_jupyter
## Baltic test case configuration Diagnostics output to close heat, salt, thickness budgets, and derive watermass transformation. This notebook is a working space to explore that output. ``` import xarray as xr import numpy as np from xhistogram.xarray import histogram ### Data loading, grabbed from MOM6-analysis cookbook # Load data on native grid rootdir = '/archive/gam/MOM6-examples/ice_ocean_SIS2/Baltic_OM4_025/tutorial_wmt/' gridname = 'natv' prefix = '19000101.ocean_' # Diagnostics were saved into different files suffixs = ['thck','heat','salt','surf','xtra'] ds = xr.Dataset() for suffix in suffixs: filename = prefix+gridname+'_'+suffix+'*.nc' dsnow = xr.open_mfdataset(rootdir+filename) ds = xr.merge([ds,dsnow]) gridname = '19000101.ocean_static.nc' grid = xr.open_dataset(rootdir+gridname).squeeze() # Specify constants for the reference density and the specific heat capacity rho0 = 1035. Cp = 3992. # Specify the diffusive tendency terms processes=['boundary forcing','vertical diffusion','neutral diffusion', 'frazil ice','internal heat'] terms = {} terms['heat'] = {'boundary forcing':'boundary_forcing_heat_tendency', 'vertical diffusion':'opottempdiff', 'neutral diffusion':'opottemppmdiff', 'frazil ice':'frazil_heat_tendency', 'internal heat':'internal_heat_heat_tendency'} terms['salt'] = {'boundary forcing':'boundary_forcing_salt_tendency', 'vertical diffusion':'osaltdiff', 'neutral diffusion':'osaltpmdiff', 'frazil ice':None, 'internal heat':None} terms['thck'] = {'boundary forcing':'boundary_forcing_h_tendency', 'vertical diffusion':None, 'neutral diffusion':None, 'frazil ice':None, 'internal heat':None} colors = {'boundary forcing':'tab:blue', 'vertical diffusion':'tab:orange', 'neutral diffusion':'tab:green', 'frazil ice':'tab:red', 'internal heat':'tab:purple'} ``` *** 11/11/20 gmac In equating the content tendency output by the model with the tendency of the materially conserved tracer (e.g. heat tendency and temperature), I think I am making an error by not accomodating changes in thickness. The product rule shows clearly that $h\dot{\lambda} \neq \dot{(h\lambda)}$, and it is the LHS that we wish to have in the WMT expression. Here, try applying a correction for $\lambda\dot{h}$. *[But, look again carefully at MOM5_elements, Eq. 36.87, equates the two. There is no thickness rate of change on the LHS. This is true due to continuity, **except** in the presence of a surface volume flux. This is what is then explored in Section 36.8.6.]* ``` G_prior = xr.Dataset() G = xr.Dataset() budget = 'salt' # Specify the tracer, its range and bin widths (\delta\lambda) for the calculation if budget == 'heat': tracer = ds['temp'] delta_l = 0.2 lmin = -2 lmax = 10 elif budget == 'salt': tracer = ds['salt'] delta_l = 0.2 lmin = 2 lmax = 36 bins = np.arange(lmin,lmax,delta_l) for process in processes: term = terms[budget][process] if term is not None: nanmask = np.isnan(ds[term]) tendency = ds[term] if budget == 'heat': tendency /= Cp*rho0 # Calculate G prior to thickness correction G_prior[process] = histogram(tracer.where(~nanmask).squeeze(), bins=[bins], dim=['xh','yh','zl'], weights=( rho0*(tendency )*grid['areacello'] ).where(~nanmask).squeeze() )/np.diff(bins) # Accomodate thickness changes if nonzero term_thck = terms['thck'][process] if term_thck is not None: tendency -= tracer*ds[term_thck] G[process] = histogram(tracer.where(~nanmask).squeeze(), bins=[bins], dim=['xh','yh','zl'], weights=( rho0*(tendency )*grid['areacello'] ).where(~nanmask).squeeze() )/np.diff(bins) for process in G.data_vars: G_prior[process].plot(label=process,color=colors[process],linestyle=':') G[process].plot(label=process,color=colors[process]) ```
github_jupyter
``` from pathlib import Path import numpy as np import pandas as pd import swifter import cleantext pd.options.display.max_colwidth = 1000 OUT = Path('~/data/ynacc_proc/replicate/threads_last') BASE_PATH = Path('/mnt/data/datasets/ydata-ynacc-v1_0') ANN1 = BASE_PATH/'ydata-ynacc-v1_0_expert_annotations.tsv' ANN2 = BASE_PATH/'ydata-ynacc-v1_0_turk_annotations.tsv' UNL = BASE_PATH/'ydata-ynacc-v1_0_unlabeled_conversations.tsv' TRAIN_IDS = BASE_PATH/'ydata-ynacc-v1_0_train-ids.txt' trainids = pd.read_csv(TRAIN_IDS, header=None) df_an1 = pd.read_table(ANN1) df_an1 = df_an1[df_an1['sdid'].isin(list(trainids[0]))] df_an1 = df_an1[['sdid', 'text', 'commentindex']] df_an1 = df_an1.drop_duplicates() df_an1 df_an2 = pd.read_table(ANN2) df_an2 = df_an2[df_an2['sdid'].isin(list(trainids[0]))] df_an2 = df_an2[['sdid', 'text', 'commentindex']] df_an2 = df_an2.drop_duplicates() df_an2 df_notan = pd.read_csv(UNL, engine='python', sep='\t', quoting=3, error_bad_lines=False) df_notan = df_notan[['sdid', 'text', 'commentindex']] # not needed anmoyre # df['text'] = df.apply(lambda x: 'xx_root_comment ' + x['text'] if pd.isnull(x['parentid']) else x['text'], axis=1) # df['parentid'] = df.apply(lambda x: x['commentid'] if pd.isnull(x['parentid']) else x['parentid'], axis=1) df = pd.concat([df_an1, df_an2, df_notan]) # clean up df = df.dropna(subset=['text']) df["commentindex"] = pd.to_numeric(df["commentindex"]) df df['text'] = df['text'].swifter.apply(lambda x: cleantext.clean(x, lower=False, no_urls=True, no_emails=True, zero_digits=True)) df = df.drop_duplicates() # get list of all comennts per thread res = df.sort_values(by=['commentindex']).groupby('sdid').agg({'text': lambda x: list(x)}).reset_index() res # create all possible thread combinations new_items = [] def create_threads(row): for i in range(1, len(row['text']) + 1): x = row['text'][:i] new = 'xx_thread_start ' + ' '.join([ 'xx_comment_start ' + (' xx_last ' + xx if xx == list(x)[-1] else xx) + ' xx_comment_end' for xx in list(x)]) + ' xx_thread_end' new_items.append({'text': new, 'sdid': row['sdid']}) for _, row in res.iterrows(): create_threads(row) final = pd.DataFrame(new_items) final # final['text'] = final['text'].swifter.apply(lambda x: clean(x, lower=False)) final.groupby('sdid').count() final.shape split_id = 130000 final["sdid"] = pd.to_numeric(final["sdid"]) train = final[final['sdid'] <= split_id][['text']] val = final[final['sdid'] > split_id][['text']] train val Path('/home/group7/data/ynacc_proc/replicate/threads_last').mkdir(exist_ok=True) ! ls /home/group7/data/ynacc_proc/replicate train.to_csv(OUT/'train.csv', index=False) val.to_csv(OUT/'val.csv', index=False) ```
github_jupyter
``` import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import gc import json import math import cv2 import PIL from PIL import Image import seaborn as sns sns.set(style='darkgrid') from sklearn.preprocessing import LabelEncoder from keras.utils import to_categorical from keras import layers from keras.applications import ResNet50,MobileNet, DenseNet201, InceptionV3, NASNetLarge, InceptionResNetV2, NASNetMobile from keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, TensorBoard from keras.preprocessing.image import ImageDataGenerator from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.optimizers import Adam import matplotlib.pyplot as plt import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import cohen_kappa_score, accuracy_score import scipy from tqdm import tqdm import tensorflow as tf from keras import backend as K import gc from functools import partial from sklearn import metrics from collections import Counter import json import itertools import matplotlib.pyplot as plt from sklearn.model_selection import KFold from sklearn.preprocessing import OneHotEncoder from tqdm import tqdm from sklearn.decomposition import PCA %matplotlib inline sub = pd.read_csv('/kaggle/input/siim-isic-melanoma-classification/sample_submission.csv') import os print(os.listdir("../input/siim-isic-melanoma-classification")) #Loading Train and Test Data train = pd.read_csv("../input/siim-isic-melanoma-classification/train.csv") test = pd.read_csv("../input/siim-isic-melanoma-classification/test.csv") print("{} images in train set.".format(train.shape[0])) print("{} images in test set.".format(test.shape[0])) train.head() test.head() ``` Let's look at the distribution of teh target: ``` np.mean(train.target) ``` So this is a binary classification problem with highly imbalanced data. Let's take a look at a few images. ``` plt.figure(figsize=(10,5)) sns.countplot(x='target', data=train, order=list(train['target'].value_counts().sort_index().index) , color='cyan') train['target'].value_counts() train.columns z=train.groupby(['target','sex'])['benign_malignant'].count().to_frame().reset_index() z.style.background_gradient(cmap='Reds') sns.catplot(x='target',y='benign_malignant', hue='sex',data=z,kind='bar') from keras.models import Sequential from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Convolution2D,Conv2D from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.optimizers import SGD from keras.callbacks import TensorBoard from keras import applications ``` **TRAINING** ``` import time start=time.time() train_images = np.load('../input/rgb-3500-96/train_images_rgb_3500_96.npy') end=time.time() print(f"\nTime to load train images: {round(end-start,5)} seconds.") print('Train_images shape: ',train_images.shape) start=time.time() test_images = np.load('../input/test-images-rgb-10000-96/test_images_rbg_10000_96.npy') end=time.time() print(f"\nTime to load test images: {round(end-start,5)} seconds.") print('Test_images shape: ',test_images.shape) #target data train_labels =np.load('../input/rgb-3500-96/train_labels_rgb_3500_96.npy') print('Train_labels shape: ',train_labels.shape) #spliting train data from sklearn.model_selection import train_test_split x_train,x_val,y_train,y_val=train_test_split(train_images,train_labels,test_size=0.3) print('x_train shape: ',x_train.shape) print('x_val shape: ',x_val.shape) ``` **DATA AUGMENTATION** ``` augs = ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True) augs.fit(x_train) ``` **MODELLING** ``` #VGG-16 MODEL NO. 1 from keras.applications.vgg16 import VGG16 model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(32,32,3))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) #XCEPTION MODEL NO. 2 from keras.layers import Dropout, DepthwiseConv2D, MaxPooling2D, concatenate from keras.models import Model inp = Input(shape = (96,96, 3)) x = inp x = Conv2D(32, (3, 3), strides = 2, padding = "same", activation = "relu")(x) x = BatchNormalization(axis = 3)(x) x = Dropout(0.4)(x) x = Conv2D(64, (3, 3), strides = 1, padding = "same", activation = "relu")(x) x = BatchNormalization(axis = 3)(x) x = Dropout(0.4)(x) x1 = DepthwiseConv2D((3, 3), (1, 1), padding = "same", activation = "relu")(x) x = BatchNormalization(axis = 3)(x) x = Dropout(0.4)(x) x1 = DepthwiseConv2D((3, 3), (1, 1), padding = "same", activation = "relu")(x1) x = BatchNormalization(axis = 3)(x) x = Dropout(0.4)(x) x1 = MaxPooling2D((2, 2), strides = 1)(x1) x = concatenate([x1, Conv2D(64, (2, 2), strides = 1)(x)]) x1 = Activation("relu")(x) x1 = Conv2D(256, (3, 3), strides = 1, padding = "same", activation = "relu")(x1) x = BatchNormalization(axis = 3)(x) x = Dropout(0.4)(x) x1 = DepthwiseConv2D((3, 3), strides = 1, padding = "same", activation = "relu")(x1) x = BatchNormalization(axis = 3)(x) x = Dropout(0.4)(x) x1 = DepthwiseConv2D((3, 3), strides = 1, padding = "same")(x1) x = BatchNormalization(axis = 3)(x) x = Dropout(0.4)(x) x1 = MaxPooling2D((2, 2), strides = 1)(x1) x = concatenate([x1, Conv2D(256, (2, 2), strides = 1)(x)]) x = Activation("relu")(x) x = Conv2D(256, (3, 3), strides = 1, padding = "same", activation = "relu")(x) x = BatchNormalization(axis = 3)(x) x = Dropout(0.4)(x) x = Conv2D(128, (3, 3), strides = 1, padding = "same", activation = "relu")(x) x = BatchNormalization(axis = 3)(x) x = Dropout(0.4)(x) x = Flatten()(x) x = Dense(1, activation = "sigmoid")(x) model2 = Model(inp, x) model2.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"]) model2.summary() #DENSENET MODEL NO. 3 from tensorflow.keras.applications import DenseNet201 import tensorflow.keras.layers as L dnet201 = DenseNet201( input_shape=(96,96, 3), include_top=False ) dnet201.trainable = True model3 = tf.keras.Sequential([ dnet201, L.GlobalAveragePooling2D(), L.Dense(1, activation='sigmoid') ]) model3.compile( optimizer='adam', loss = 'binary_crossentropy', metrics=['accuracy'] ) model3.summary() batch_size=128 epochs=30 history = model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=epochs, verbose=1, validation_data=(x_val,y_val)) batch_size=128 epochs=15 history2 = model2.fit(x_train, y_train, batch_size=batch_size, nb_epoch=epochs, verbose=1, validation_data=(x_val,y_val)) batch_size=128 epochs=30 history3 = model3.fit(x_train, y_train, batch_size=batch_size, nb_epoch=epochs, verbose=1, validation_data=(x_val,y_val)) model.save("vgg16.h5") model2.save("xception.h5") model3.save("densenet.h5") ``` **EVALUATION** ``` scores = model.evaluate(x_val, y_val, verbose=0) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) scores = model2.evaluate(x_val, y_val, verbose=0) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) scores = model3.evaluate(x_val, y_val, verbose=0) print('Test loss_3:', scores[0]) print('Test accuracy_3:', scores[1]) ``` **PREDICTION** ``` y_test_prob = model.predict(test_images) pred_df = pd.DataFrame({'image_name': test['image_name'], 'target': np.concatenate(y_test_prob)}) pred_df.to_csv('submission_vgg.csv',header=True, index=False) pred_df.head(10) y_test_prob2 = model2.predict(test_images) pred_df2 = pd.DataFrame({'image_name': test['image_name'], 'target': np.concatenate(y_test_prob2)}) pred_df2.to_csv('submission_xception.csv',header=True, index=False) pred_df2.head(10) y_test_prob3 = model3.predict(test_images) pred_df3 = pd.DataFrame({'image_name': test['image_name'], 'target': np.concatenate(y_test_prob3)}) pred_df3.to_csv('submission_dense.csv',header=True, index=False) pred_df3.head(10) ``` **ENSEMBLE** ``` en = pd.DataFrame({'image_name':test['image_name'], 'target':(0.3*pred_df['target'] + 0.3*pred_df2['target'] + 0.3*pred_df3['target'])}) en.to_csv('ensemble1.csv',header=True, index=False) en.head(10) ```
github_jupyter
# Intro to reimbursements: overview with visualization This notebook provides an overview of the `2017-03-15-reimbursements.xz` dataset, which contains broad data regarding CEAP usage in all terms since 2009. It aims to provide an example of basic analyses and visualization by exploring topics such as: - Average monthly spending per congressperson along the years - Seasonality in reimbursements - Reimbursements by type of spending - Which party has the most spending congressmen? - Which state has the most spending congressmen? - Who were the most hired suppliers by amount paid? - Which were the most expensive individual reimbursements? Questions are not explicitly answered. Charts and tables are provided for free interpretation, some of them with brief commentaries from the author. **Obs**.: original analysis was made considering data from 2009 to 2017 (mainly until 2016). One might want to filter by terms (e.g. 2010-2014) to make more realistic comparisons (spenditures by state, party, congressperson, etc.). Code cell #4 provides an example of how it could be done. --- ``` import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt from pylab import rcParams %matplotlib inline # Charts styling plt.style.use('ggplot') rcParams['figure.figsize'] = 15, 8 matplotlib.rcParams.update({'font.size': 14}) #rcParams['font.family'] = 'Georgia' # Type setting for specific columns #DTYPE = dict(cnpj=np.str, cnpj_cpf=np.str, ano=np.int16, term=np.str) # Experimenting with 'category' type to reduce df size DTYPE =dict(cnpj_cpf=np.str,\ year=np.int16,\ month=np.int16,\ installment='category',\ term_id='category',\ term='category',\ document_type='category',\ subquota_group_id='category',\ subquota_group_description='category',\ #subquota_description='category',\ subquota_number='category',\ state='category',\ party='category') reimbursements = pd.read_csv('../data/2017-03-15-reimbursements.xz', \ dtype=DTYPE, low_memory=False, parse_dates=['issue_date']) # Creates a DataFrame copy with fewer columns r = reimbursements[['year', 'month', 'total_net_value', 'party', 'state', 'term', 'issue_date',\ 'congressperson_name', 'subquota_description','supplier', 'cnpj_cpf']] r.head() ``` ## Filters depending on the scope of analysis Here, filters by state, party, years, etc. can be applied. Obs.: chart commentaries provided might not remain valid depending on filters chosen. ``` # Filters only most recent years (from 2015) #r = r[(r.year == 2015) | (r.year == 2016) | (r.year == 2017)] #r.head() ``` ## Questions & answers ### Evolution of average monthly spending along the years Are congressmen spending more today in relation to past years? #### How many congressmen in each year? ``` years = r.year.unique() # Computes unique names in each year and saves into a pd.Series d = dict() for y in years: d[y] = r[r.year == y].congressperson_name.nunique() s = pd.Series(d) s s.plot(kind='bar') plt.title('Qtdy of congressmen listed per year') ``` ##### Commentary Greater number of congressmen in 2011 and 2015 is due to term transitions which occur during the year. --- #### How much did they spend, in average, per month in each year? This analysis takes into consideration the following elements: - Main data: - Monthly average spending per congressman during each year - Relevant aspects for trend comparison: - CEAP limit for each year (i.e. the maximum allowed quota increased during the years) - Inflation indexes (i.e. prices of goods raised during the years) ##### Evolution of inflation (IPCA) ``` # Source: http://www.ibge.gov.br/home/estatistica/indicadores/precos/inpc_ipca/defaultseriesHist.shtm ipca_years = [2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016] ipca_indexes = [0.0431, 0.0590, 0.0650, 0.0583, 0.0591, 0.0641, 0.1067, 0.0629] ipca = pd.DataFrame({ 'year': ipca_years, 'ipca': ipca_indexes }) # Filters only by years in dataset ipca = ipca[ipca['year'].isin(r.year.unique())].set_index('year') ipca.head() ``` ##### Maximum quota allowed (CEAP limits) There is information available for maximum CEAP for 2009 and 2017. Therefore, a simple compound growth rate (CAGR) is calculated from 2009 to 2017. Values for years in between are assumed to be a linear composition of the growth rate. ``` states = ['AC', 'AL', 'AM', 'AP', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MG', 'MS', 'MT', 'PA', 'PB', 'PE', 'PI', 'PR', 'RJ', 'RN', 'RO', 'RR', 'RS', 'SC', 'SE', 'SP', 'TO'] # Source: http://www2.camara.leg.br/a-camara/estruturaadm/diretorias/dirgeral/estrutura-1/deapa/portal-da-posse/ceap-1 ceap_2009 = [40711.32, 37318.73, 39734.17, 39554.50, 35540.51, 38705.50, 27977.66, 34080.83, 32317.69, 38429.49, 32856.38, 36949.65, 35924.24, 38499.17, 38319.91, 37992.68, 37344.18, 35412.67, 32550.32, 38963.25, 39828.33, 41612.80, 37256.00, 36337.92, 36578.43, 33730.95, 35993.76] # Source: http://www2.camara.leg.br/comunicacao/assessoria-de-imprensa/cota-parlamentar ceap_2017 = [44632.46, 40944.10, 43570.12, 43374.78, 39010.85, 42451.77, 30788.66, 37423.91, 35507.06, 42151.69, 36092.71, 40542.84, 39428.03, 42227.45, 42032.56, 41676.80, 40971.77, 38871.86, 35759.97, 42731.99, 43672.49, 45612.53, 40875.90, 39877.78, 40139.26, 37043.53, 39503.61] ceap_limit_states = pd.DataFrame({ 'ceap_2009': ceap_2009, 'ceap_2017': ceap_2017 }, index=states) ceap_limit_states.head() all_years = ipca_years # Calculates CAGR according to data available (CEAP@2009 and CEAP@2017), using the CEAP average among states cagr = ((ceap_limit_states.ceap_2017.mean() / ceap_limit_states.ceap_2009.mean())**(1./(2017-2009)) - 1) # Computes estimated CEAP values for years in between 2009 and 2017 using CAGR ceap_values = [] for i in range(2017-2009): if i == 0: ceap_values.append(ceap_limit_states.ceap_2009.mean()) elif i == (r.year.nunique() - 1): ceap_values.append(ceap_limit_states.ceap_2017.mean()) else: ceap_values.append(ceap_values[i-1] * (1 + cagr)) # Creates df with all years ceap_limit_years = pd.DataFrame({ 'year': all_years, 'max_avg_ceap': ceap_values }) # Filters only by years in dataset ceap_limit_years = ceap_limit_years[ceap_limit_years['year'].isin(r.year.unique())].set_index('year') ceap_limit_years.head() # Groups by name summing up spendings a = r.groupby(['year']).sum().drop('month', 1) a['congressmen_qty'] = s a['avg_monthly_value_per_congressmen'] = a['total_net_value'] / a['congressmen_qty'] / 12 a = a.drop(2017, 0) # Neglets 2017 # Adds columns for CEAP limits and IPCA indexes a['max_avg_ceap'] = ceap_limit_years['max_avg_ceap'] a['pct_of_quota_used'] = (a['avg_monthly_value_per_congressmen'] / a['max_avg_ceap']) * 100 a['ipca'] = ipca['ipca'] a['acc_ipca'] = (a['ipca'] + 1).cumprod() - 1 a # Procedure to handle secondary Y axis fig0, ax0 = plt.subplots() ax1 = ax0.twinx() y0 = a[['avg_monthly_value_per_congressmen', 'max_avg_ceap']].plot(kind='line', ax=ax0)#, label='Itens vendidos') y1 = (a['acc_ipca']*100).plot(kind='line', secondary_y=False, style='g--', ax=ax1)#, label='Preço unitário') y0.legend(loc=2) # bar legend to the left y1.legend(loc=1) # line legend to the right y0.set_ylim((0,50000)) #y1.set_ylim((0,50000)) y0.set_ylabel('CEAP usage and limit (R$)') y1.set_ylabel('Accumulated IPCA index (%)') plt.title('Avg. monthly congressmen spending vs. maximum quota and inflation idx.') plt.show() plt.close() ``` ##### Commentary Although average spending has increased along the years, it can be due to both aspects considered: raises in prices and expanded limit for reimbursements. The next chart shows how spending has increased with respect to quota limits. ``` a.pct_of_quota_used.plot() plt.ylim((0,100)) plt.title('Fluctuation of monthly CEAP spending per congressperson (% of max. quota)') ``` ##### Commentary The chart shows that average spending has increased more than quota limits were raised (from ca. 40% to 60% of quota usage). This might be due to the steep rise in inflation levels, as observed in the previous chart. --- ### Average monthly spending per congressperson along the years This table shows the data above detailed per congressperson. ``` # Groups by name summing up spendings a = r.groupby(['congressperson_name', 'year'])\ .sum()\ .drop('month', 1) # Computes average spending per month and unstacks a['monthly_total_net_value'] = a['total_net_value'] / 12 a = a.drop('total_net_value', 1).unstack() # Creates subtotal column to the right a['mean'] = a.mean(axis=1) a.head() ``` ### Seasonality in reimbursements Out of curiosity,in which period of the year more reimbursements were issued? ``` r.groupby('month')\ .sum()\ .total_net_value\ .sort_index()\ .plot(kind='bar', rot=0) plt.title('Fluctuation of reimbursements issued by months (R$)') ``` ### Reimbursements by type of spending For what are congressmen most using their quota? ``` r.groupby('subquota_description')\ .sum()\ .total_net_value\ .sort_values(ascending=True)\ .plot(kind='barh') plt.title('Total spent by type of service (R$)') ``` ##### Commentary This chart makes it clear what is prioritized by congressmen: publicity of their activity. Voters might judge whether this choice is reasonable or not. --- ### Which party has the most spending congressmen? ##### How many congressmen in each party? ``` parties = r.party.unique() parties # Computes unique names in each state and saves into a pd.Series d = dict() for p in parties: d[p] = r[r.party == p].congressperson_name.nunique() s = pd.Series(d) s ``` #### How much did congressmen from each party spend in the year, in average? ``` t = r.groupby('party').sum() t = t.drop(['year', 'month'], 1) # Removes useless columns t['congressmen_per_party'] = s years = r.year.nunique() t['monthly_value_per_congressperson'] = t['total_net_value'] / t['congressmen_per_party'] / (12*years) t.sort_values(by='monthly_value_per_congressperson', ascending=False).head() t.monthly_value_per_congressperson\ .sort_values(ascending=False)\ .plot(kind='bar') plt.title('Average monthly reimbursements per congressperson by party (R$)') ``` ##### Commentary It is important to note that many congressmen change parties frequently. Therefore, anyone interested in drawing conclusions regarding parties might want to analyse the data in further detail than it is presented here. --- ### Which state has the most spending congressmen? ##### How many congressmen in each state? ``` states = r.state.unique() states # Computes unique names in each party and saves into a pd.Series d = dict() for s in states: d[s] = r[r.state == s].congressperson_name.nunique() s = pd.Series(d) s ``` #### How much did congressmen from each party spend in the year, in average? ##### (!) Important: CEAP maximum value differs among states As already commented previously, CEAP max. quota varies among state, according to: http://www2.camara.leg.br/comunicacao/assessoria-de-imprensa/cota-parlamentar, ``` # CEAP maximum values from 2017 ceap_states = ceap_limit_states.drop('ceap_2009',1) ceap_states.columns = ['monthly_max_ceap'] # Renames column to be compatible to code below ceap_states.head() t = r.groupby('state').sum() t = t.drop(['year', 'month'], 1) # Removes useless columns t['congressmen_per_state'] = s t['monthly_max_ceap'] = ceap_states years = r.year.nunique() t['monthly_value_per_congressperson'] = t['total_net_value'] / t['congressmen_per_state'] / (12*years) t['ceap_usage'] = (t['monthly_value_per_congressperson'] / t['monthly_max_ceap']) * 100 t.sort_values(by='ceap_usage', ascending=False).head() t.ceap_usage\ .sort_values(ascending=False)\ .plot(kind='bar', rot=0) plt.title('Average monthly CEAP usage per congressperson by state (% of max. quota)') ``` #### Comparison between given state and the country's average ``` t.head() country_average = t.ceap_usage.mean() country_average # Parametrizes single state analysis state = 'SP' state_average = t.loc[state].ceap_usage state_average s = pd.Series() s['average_all_states'] = country_average s[state] = state_average s s.plot(kind='bar', rot=0) plt.title('Average monthly CEAP usage per congressperson: ' + state + ' vs. rest of the country (% of max. quota)') ``` ### Who were the top spenders of all time in absolute terms? ``` r.groupby('congressperson_name')\ .sum()\ .total_net_value\ .sort_values(ascending=False)\ .head(10) r.groupby('congressperson_name')\ .sum()\ .total_net_value\ .sort_values(ascending=False)\ .head(30)\ .plot(kind='bar') plt.title('Total reimbursements issued per congressperson (all years)') ``` ##### Commentary Because the dataset comprises 2009-2017, it might not be reasonable to draw any hard conclusions by looking to this chart alone. Some congressmen might have been elected for longer periods and that would reflect on higher reimbursement total values. For a more detailed - hence coherent - analysis, one might want to make this comparison for each term (e.g. 2010-2014). That would better identify "top spenders" by comparing congressmen spendings on the same time period. Another interesting analysis can be made by expanding the chart to all congressmen, not only the top 30. This enables a richer look at how discrepant top spenders are from the rest. To do that, just change `.head(30)\` argument in the previous cell. --- ### Who were the most hired suppliers by amount paid? This analysis identifies suppliers by their unique CNPJ. It is worth noting that, commonly, some telecom carriers use different CNPJ for its subsidiaries in different states (e.g. TIM SP, TIM Sul, etc). ``` sp = r.groupby(['cnpj_cpf', 'supplier', 'subquota_description'])\ .sum()\ .drop(['year', 'month'], 1)\ .sort_values(by='total_net_value', ascending=False) sp.reset_index(inplace=True) sp = sp.set_index('cnpj_cpf') sp.head() cnpj = r.groupby('cnpj_cpf')\ .sum()\ .drop(['year', 'month'], 1)\ .sort_values(by='total_net_value', ascending=False) cnpj.head() # Adds supplier name besides total_net_value in cnpj df cnpj['supplier'] = '' # Creates empty column cnpj = cnpj.head(1000) # Gets only first 1000 for this analysis # Looks up for supplier names in sp df and fills cnpj df (it might take a while to compute...) for i in range(len(cnpj)): try: cnpj.set_value(cnpj.index[i], 'supplier', sp.loc[cnpj.index[i]].supplier.iloc[0]) except: cnpj.set_value(cnpj.index[i], 'supplier', sp.loc[cnpj.index[i]].supplier) cnpj.head(10) # Fixes better indexing to plot in a copy sp2 = cnpj.set_index('supplier') sp2.head(30)\ .plot(kind='bar') plt.title('Most hired suppliers (unique CNPJ) by total amount paid (R$)') ``` ##### Commentary In general, telecom carries were the suppliers with higher concentration of reimbursements. It is worth noting, however, that Telecommunication subquota accounts for only 8% of the reimbursents. This might suggest a 'long tail' pattern for other subquota types such as publicity, which accounts for 28% of all reimbursements. Another aspect worth noting is the fact that some individual suppliers ("pessoas físicas") appear as top 15 suppliers (e.g. Mr. Douglas da Silva and Mrs. Joceli do Nascimento). One might wonder if such concentration of reimbursements for single-person suppliers is reasonable. ``` pct_telecom = r[r['subquota_description'] == 'Telecommunication'].total_net_value.sum() / r.total_net_value.sum() pct_telecom pct_publicity = r[r['subquota_description'] == 'Publicity of parliamentary activity'].total_net_value.sum() / r.total_net_value.sum() pct_publicity ``` #### Congressmen that hired the top supplier and how much they paid ``` r.groupby(['cnpj_cpf', 'congressperson_name'])\ .sum()\ .sort_values(by='total_net_value', ascending=False)\ .loc['02558157000162']\ .total_net_value\ .head(20) ``` ### Which are the most expensive individual reimbursements? ``` r = r.sort_values(by='total_net_value', ascending=False) r.head(20) ```
github_jupyter
**Copyright 2021 The TensorFlow Hub Authors.** Licensed under the Apache License, Version 2.0 (the "License"); ``` # Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/senteval_for_universal_sentence_encoder_cmlm.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/senteval_for_universal_sentence_encoder_cmlm.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/senteval_for_universal_sentence_encoder_cmlm.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> <td> <a href="https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> </td> </table> #Universal Sentence Encoder SentEval demo This colab demostrates the [Universal Sentence Encoder CMLM model](https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1) using the [SentEval](https://github.com/facebookresearch/SentEval) toolkit, which is a library for measuring the quality of sentence embeddings. The SentEval toolkit includes a diverse set of downstream tasks that are able to evaluate the generalization power of an embedding model and to evaluate the linguistic properties encoded. Run the first two code blocks to setup the environment, in the third code block you can pick a SentEval task to evaluate the model. A GPU runtime is recommended to run this Colab. To learn more about the Universal Sentence Encoder CMLM model, see https://openreview.net/forum?id=WDVD4lUCTzU. ``` #@title Install dependencies !pip install --quiet "tensorflow-text==2.8.*" !pip install --quiet torch==1.8.1 ``` ## Download SentEval and task data This step download SentEval from github and execute the data script to download the task data. It may take up to 5 minutes to complete. ``` #@title Install SentEval and download task data !rm -rf ./SentEval !git clone https://github.com/facebookresearch/SentEval.git !cd $PWD/SentEval/data/downstream && bash get_transfer_data.bash > /dev/null 2>&1 ``` #Execute a SentEval evaulation task The following code block executes a SentEval task and output the results, choose one of the following tasks to evaluate the USE CMLM model: ``` MR CR SUBJ MPQA SST TREC MRPC SICK-E ``` Select a model, params and task to run. The rapid prototyping params can be used for reducing computation time for faster result. It typically takes 5-15 mins to complete a task with the **'rapid prototyping'** params and up to an hour with the **'slower, best performance'** params. ``` params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5} params['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128, 'tenacity': 3, 'epoch_size': 2} ``` For better result, use the slower **'slower, best performance'** params, computation may take up to 1 hour: ``` params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10} params['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 16, 'tenacity': 5, 'epoch_size': 6} ``` ``` import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import sys sys.path.append(f'{os.getcwd()}/SentEval') import tensorflow as tf # Prevent TF from claiming all GPU memory so there is some left for pytorch. gpus = tf.config.list_physical_devices('GPU') if gpus: # Memory growth needs to be the same across GPUs. for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) import tensorflow_hub as hub import tensorflow_text import senteval import time PATH_TO_DATA = f'{os.getcwd()}/SentEval/data' MODEL = 'https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1' #@param ['https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1', 'https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1'] PARAMS = 'rapid prototyping' #@param ['slower, best performance', 'rapid prototyping'] TASK = 'CR' #@param ['CR','MR', 'MPQA', 'MRPC', 'SICKEntailment', 'SNLI', 'SST2', 'SUBJ', 'TREC'] params_prototyping = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5} params_prototyping['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128, 'tenacity': 3, 'epoch_size': 2} params_best = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10} params_best['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 16, 'tenacity': 5, 'epoch_size': 6} params = params_best if PARAMS == 'slower, best performance' else params_prototyping preprocessor = hub.KerasLayer( "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3") encoder = hub.KerasLayer( "https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1") inputs = tf.keras.Input(shape=tf.shape(''), dtype=tf.string) outputs = encoder(preprocessor(inputs)) model = tf.keras.Model(inputs=inputs, outputs=outputs) def prepare(params, samples): return def batcher(_, batch): batch = [' '.join(sent) if sent else '.' for sent in batch] return model.predict(tf.constant(batch))["default"] se = senteval.engine.SE(params, batcher, prepare) print("Evaluating task %s with %s parameters" % (TASK, PARAMS)) start = time.time() results = se.eval(TASK) end = time.time() print('Time took on task %s : %.1f. seconds' % (TASK, end - start)) print(results) ``` #Learn More * Find more text embedding models on [TensorFlow Hub](https://tfhub.dev) * See also the [Multilingual Universal Sentence Encoder CMLM model](https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-base-br/1) * Check out other [Universal Sentence Encoder models](https://tfhub.dev/google/collections/universal-sentence-encoder/1) ## Reference * Ziyi Yang, Yinfei Yang, Daniel Cer, Jax Law, Eric Darve. [Universal Sentence Representations Learning with Conditional Masked Language Model. November 2020](https://openreview.net/forum?id=WDVD4lUCTzU)
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt from PIL import Image def _if_near(point, mask, nearest_neighbor): nn = nearest_neighbor w,h = mask.shape x,y = point mask = np.pad(mask,nn,'edge') x += nn y += nn if(w+nn>x and h+nn>y): x_i,y_i = int(x+0.5),int(y+0.5) #return True near = mask[x_i-nn:x_i+nn,y_i-nn:y_i+nn] if near.max()-near.min() != 0: if(x<w and y<h): return True return False # *** # *n* It's an example of 1-neighbor # *** # # ***** # ***** # **n** It's an example of 2-neighbor # ***** # ***** # # Did you get any of that? def _get_edge_k_neighbor(img,k): ''' I will say the idea is identical to the the original _is_near, but this implement save the temporal result and thus speed up the whole process by a massive margin when a big amount of points requires calculation. This will return a array sized (w,h), store the max-min value in its neighbor. ''' w,h = img.shape padded = np.pad(img, k, 'edge') # this is the result image array res = np.zeros(img.shape) # This is the main process for i in range(w): for j in range(h): neighbor = padded[i:i+2*k,j:j+2*k] _max = neighbor.max() _min = neighbor.min() res[i-k,j-k] = (_max-_min) return res def _new_if_near(point, edge_k_neighbor): x, y = point x, y = int(x), int(y) return edge_k_neighbor[x][y]>0 def getpoint(mask_img, k, beta, training = True, nearest_neighbor=3, new_if_near = True): w,h = mask_img.shape N = int(beta*k*w*h) xy_min = [0, 0] xy_max = [w-1, h-1] points = np.random.uniform(low=xy_min, high=xy_max, size=(N,2)) #print(points) if(beta>1 or beta<0): print("beta should be in range [0,1]") return NULL # for the training, the mask is a hard mask if training == True: if beta ==0: return points res = [] if new_if_near: edge_k_neighbor = _get_edge_k_neighbor(mask_img,nearest_neighbor) for p in points: if _new_if_near(p,edge_k_neighbor): res.append(p) else: for p in points: if _if_near(p,mask_img,nearest_neighbor): res.append(p) others = int((1-beta)*k*w*h) not_edge_points = np.random.uniform(low=xy_min, high=xy_max, size=(others,2)) for p in not_edge_points: res.append(p) return res # for the inference, the mask is a soft mask if training == False: res = [] for i in range(w): for j in range(h): if mask_img[i,j] > 0: res.append((i,j)) return res def _generate_mask(size, func = lambda x:x*x): w,h = size res = np.zeros((w,h)) for x in range(w): for y in range(h): if y> func(x): res[x,y] = 255 return res my_mask = _generate_mask((14,14), ) plt.imshow(my_mask) %%timeit #plt.imshow(my_mask,cmap="Purples") points = getpoint(mask_img=my_mask,k=1000,beta=0.8,nearest_neighbor=2,new_if_near=True) # points = list(zip(*points)) # plt.scatter(points[1],points[0],c='black',s=4) plt.imshow(my_mask,cmap="Purples") points = getpoint(my_mask,1,1,nearest_neighbor=2) points = list(zip(*points)) plt.scatter(points[1],points[0],c='black',s=4) plt.imshow(my_mask,cmap="Purples") points = getpoint(my_mask,10,1,nearest_neighbor=2) points = list(zip(*points)) plt.scatter(points[1],points[0],c='black',s=4) my_mask = np.asarray(Image.open("tree_mask.jpg").resize((32,32))) my_mask = my_mask[:,:,0] my_mask.shape plt.imshow(my_mask,cmap="Purples") points = getpoint(my_mask,1,1,nearest_neighbor=1) points = list(zip(*points)) plt.scatter(points[1],points[0],c='black',s=4) from pointGenerate import getpoint import matplotlib.pyplot as plt from PIL import Image import numpy as np resolution = 128 sz = (resolution,resolution) my_mask = np.asarray(Image.open("tree_mask.jpg").resize(sz)) my_img = np.asarray(Image.open("tree.jpg").resize(sz)) my_mask = my_mask[:,:,0] points = getpoint(my_mask,0.25,0.95,nearest_neighbor=1) points = list(zip(*points)) plt.subplot(121) plt.imshow(my_mask,cmap="Purples") plt.scatter(points[1],points[0],c='black',s=4) plt.title('k=0.25, beta=0.95') plt.subplot(122) plt.imshow(my_img,cmap="Purples") plt.scatter(points[1],points[0],c='black',s=4) plt.title('k=0.25, beta=0.95') plt.savefig('resolution=128.jpg',dpi=400) points = getpoint(my_mask,1,-0.95,nearest_neighbor=1) import matplotlib.pyplot as plt n = [196,1960,19600,196000] t1 = [6.11,61.8,609,6280] v1 = [0.122,2.92,14.3,99.9] t2 = [1.3,3.93,28.2,267] v2 = [0.0084,0.383,0.643,12.7] fig, ax2 = plt.subplots(1, 1) ax2.set_xscale("log") ax2.set_yscale("log") ax2.set_adjustable("datalim") ax2.plot(n, t1, "o-", label = 'original algorithm') ax2.plot(n, t2, "go-", label = 'improved algorithm') #ax2.set_xlim(1e-1, 1e2) #ax2.set_ylim(1e-1, 1e3) plt.ylabel('Time (ms)') plt.xlabel('Number of points') ax2.set_aspect(1) ax2.set_title("Performance improvement") plt.legend() plt.savefig('performance improvement.png',dpi=300) ```
github_jupyter
### Problem 1 __We will use a full day worth of tweets as an input (there are total of 4.4M tweets in this file, but you only need to read 1M):__ http://rasinsrv07.cstcis.cti.depaul.edu/CSC455/OneDayOfTweets.txt __a. Create a 3rd table incorporating the Geo table (in addition to tweet and user tables that you already have from HW4 and HW5) and extend your schema accordingly. You do not need to use ALTER TABLE, it is sufficient to just re-make your schema.__ __You will need to generate an ID for the Geo table primary key (you may use any value or reasonable combination of values as long as it is unique) for that table and link it to the Tweet table (foreign key should be in the Tweet). In addition to the primary key column, the geo table should have at least the “type”, “longitude” and “latitude” columns.__ ``` #imports import urllib.request, time, json, sqlite3 #setup conn = sqlite3.connect('Tweets_Database_THF1.db') #db connection c = conn.cursor() wFD = urllib.request.urlopen('http://rasinsrv07.cstcis.cti.depaul.edu/CSC455/OneDayOfTweets.txt') #get the file #c.execute('DROP TABLE IF EXISTS User'); #c.execute('DROP TABLE IF EXISTS Tweets'); #c.execute('DROP TABLE IF EXISTS Geo'); wFD.close() fdErr.close() c.close() conn.commit() conn.close() #create User Table create_UserTable = '''CREATE TABLE User ( ID INTEGER, NAME TEXT, SCREEN_NAME TEXT, DESCRIPTION TEXT, FRIENDS_COUNT INTEGER, CONSTRAINT User_pk PRIMARY KEY(ID) );''' c.execute('DROP TABLE IF EXISTS User') c.execute(create_UserTable) #create Tweets Table create_TweetsTable = '''CREATE TABLE Tweets ( ID INTEGER, Created_At DATE, Text TEXT, Source TEXT, In_Reply_to_User_ID INTEGER, In_Reply_to_Screen_Name TEXT, In_Reply_to_Status_ID INTEGER, Retweet_Count INTEGER, Contributors TEXT, User_ID INTEGER, Geo_ID Text, CONSTRAINT Tweet_pk PRIMARY KEY(ID), CONSTRAINT tweet_fk1 FOREIGN KEY (User_ID) REFERENCES User(ID), CONSTRAINT tweet_fk2 FOREIGN KEY (Geo_ID) REFERENCES Geo(ID) );''' c.execute('DROP TABLE IF EXISTS Tweets') c.execute(create_TweetsTable) #create Geo Table create_GeoTable = '''CREATE TABLE Geo ( ID Text, Type Text, Latitude INTEGER, Longitude INTEGER, CONSTRAINT Geo_pk PRIMARY KEY(ID) );''' c.execute('DROP TABLE IF EXISTS Geo') c.execute(create_GeoTable) ``` __b. Use python to download from the web and save to a local text file (not into database yet, just to text file) at least 1,000,000 lines worth of tweets. Test your code with fewer rows first and only time it when you know it works. Report how long did it take.__ __NOTE: Do not call read() or readlines() without any parameters at any point. That command will attempt to read the entire file which is too much data.__ ``` #open files start = time.time() db_file = open('THF_db.txt', 'w') # db_err_file = open('THF_db_errors.txt', 'w') for i in range(1000000): #for lines 1 through 1,000,0000 line = wFD.readline() try: db_file.write(line.decode()) #write to the database txt file except ValueError: db_err_file.write(line.decode() + '\n') #catch errors if they come up #close files db_file.close() db_err_file.close() end = time.time() print("Part b file writing took ", (end-start), ' seconds.') ``` __c. Repeat what you did in part-b, but instead of saving tweets to the file, populate the 3-table schema that you created in SQLite. Be sure to execute commit and verify that the data has been successfully loaded (report loaded row counts for each of the 3 tables).__ ``` start = time.time() fdErr = open('THF_error.txt', 'w', errors = 'replace') tweetBatch = [] userBatch = [] geoBatch = [] loadCounter = 0 # There is a total of 1,000,000 tweets, but we will do a for-loop here for i in range(1000000): line = wFD.readline() try: tweetDict = json.loads(line) # This is the dictionary for tweet info loadCounter = loadCounter + 1 #------------------------------------ #Tweet Table newRowTweet = [] # hold individual values of to-be-inserted row tweetKeys = ['id_str','created_at','text','source','in_reply_to_user_id', 'in_reply_to_screen_name', 'in_reply_to_status_id', 'retweet_count', 'contributors'] for key in tweetKeys: # For each dictionary key we want if tweetDict[key] == 'null' or tweetDict[key] == '': newRowTweet.append(None) #null else: newRowTweet.append(tweetDict[key]) # use value as-is #Adds in user_id userDict = tweetDict['user'] # This the the dictionary for user information newRowTweet.append(userDict['id']) # User id/ foreign key #Adds in geo_id geoDict = tweetDict['geo'] if tweetDict['geo']: newRowTweet.append(str(tweetDict['geo']['coordinates'])) #geo_id is the latitude/longitude as a string else: newRowTweet.append(None) # Geo info is missing #batching if loadCounter < 50: # Batching 1 at a time tweetBatch.append(newRowTweet) else: c.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch) tweetBatch = [] # Reset the list of batched tweets #------------------------------------ #User Table newRowUser = [] # hold individual values of to-be-inserted row for user table userKeys = ['id', 'name', 'screen_name', 'description', 'friends_count'] for key in userKeys: # For each dictionary key we want if userDict[key] == 'null' or userDict[key] == '': newRowUser.append(None) # proper NULL else: newRowUser.append(userDict[key]) # use value as-is #batching if loadCounter < 50: # Batching 1 at a time userBatch.append(newRowUser) else: c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch) loadCounter = 0 userBatch = [] # Reset the list of batched users #------------------------------------ #Geo Table newRowGeo = [] # hold individual values of to-be-inserted row for geo table geoKeys = ['id','type','latitude', 'longitude'] if tweetDict['geo'] == 'null' or tweetDict['geo'] == '' or tweetDict['geo'] is None: #do nothing continue else: #id newRowGeo.append(str(tweetDict['geo']['coordinates'])) #type newRowGeo.append(tweetDict['geo']['type']) #latitude newRowGeo.append(tweetDict['geo']['coordinates'][0]) #longitude newRowGeo.append(tweetDict['geo']['coordinates'][1]) if loadCounter < 50: # Batching 1 at a time geoBatch.append(newRowGeo) else: c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?)', geoBatch) loadCounter = 0 geoBatch = [] # Reset the list of batched geos except ValueError: # Handle the error of JSON parsing fdErr.write(line.decode() + '\n') # Final batch (the remaining less-than-50 rows to be loaded) c.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch) c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch) c.executemany ('INSERT OR IGNORE INTO Geo VALUES(?,?,?,?)', geoBatch) print ("Loaded ", c.execute('SELECT COUNT(*) FROM Tweets').fetchall()[0], " Tweet rows") print ("Loaded ", c.execute('SELECT COUNT(*) FROM User').fetchall()[0], " User rows") print ("Loaded ", c.execute('SELECT COUNT(*) FROM Geo').fetchall()[0], " Geo rows") wFD.close() fdErr.close() c.close() conn.commit() conn.close() end = time.time() print("Part c file writing took ", (end-start), ' seconds.') c.execute('SELECT * FROM Geo LIMIT 2').fetchall() c.execute('SELECT * FROM Tweets LIMIT 2').fetchall() c.execute('SELECT * FROM User LIMIT 2').fetchall() ``` __How long did this step take?__ It took: __d. Use your locally saved tweet file (created in part-b) to repeat the database population step from part-c. That is, load 1,000,000 tweets into the 3-table database using your saved file with tweets (do not use the URL to read twitter data).__ ``` start = time.time() #open the database text file f = open("THF_db.txt", 'r', encoding='utf-8') fdErr = open('THF_error.txt', 'w', errors = 'replace') tweetBatch = [] userBatch = [] geoBatch = [] loadCounter = 0 # There is a total of 1,000,000 tweets, but we will do a for-loop here for i in range(1000000): line = f.readline() try: tweetDict = json.loads(line) # This is the dictionary for tweet info loadCounter = loadCounter + 1 #------------------------------------ #Tweet Table newRowTweet = [] # hold individual values of to-be-inserted row tweetKeys = ['id_str','created_at','text','source','in_reply_to_user_id', 'in_reply_to_screen_name', 'in_reply_to_status_id', 'retweet_count', 'contributors'] for key in tweetKeys: # For each dictionary key we want if tweetDict[key] == 'null' or tweetDict[key] == '': newRowTweet.append(None) #null else: newRowTweet.append(tweetDict[key]) # use value as-is #Adds in user_id userDict = tweetDict['user'] # This the the dictionary for user information newRowTweet.append(userDict['id']) # User id/ foreign key #Adds in geo_id geoDict = tweetDict['geo'] if tweetDict['geo']: newRowTweet.append(str(tweetDict['geo']['coordinates'])) #geo_id is the latitude/longitude as a string else: newRowTweet.append(None) # Geo info is missing #batching if loadCounter < 50: # Batching 1 at a time tweetBatch.append(newRowTweet) else: c.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch) tweetBatch = [] # Reset the list of batched tweets #------------------------------------ #User Table newRowUser = [] # hold individual values of to-be-inserted row for user table userKeys = ['id', 'name', 'screen_name', 'description', 'friends_count'] for key in userKeys: # For each dictionary key we want if userDict[key] == 'null' or userDict[key] == '': newRowUser.append(None) # proper NULL else: newRowUser.append(userDict[key]) # use value as-is #batching if loadCounter < 50: # Batching 1 at a time userBatch.append(newRowUser) else: c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch) loadCounter = 0 userBatch = [] # Reset the list of batched users #------------------------------------ #Geo Table newRowGeo = [] # hold individual values of to-be-inserted row for geo table geoKeys = ['id','type','latitude', 'longitude'] if tweetDict['geo'] == 'null' or tweetDict['geo'] == '' or tweetDict['geo'] is None: #do nothing continue else: #id newRowGeo.append(str(tweetDict['geo']['coordinates'])) #type newRowGeo.append(tweetDict['geo']['type']) #latitude newRowGeo.append(tweetDict['geo']['coordinates'][0]) #longitude newRowGeo.append(tweetDict['geo']['coordinates'][1]) if loadCounter < 50: # Batching 1 at a time geoBatch.append(newRowGeo) else: c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?)', geoBatch) loadCounter = 0 geoBatch = [] # Reset the list of batched geos except ValueError: # Handle the error of JSON parsing fdErr.write(line.decode() + '\n') # Final batch (the remaining less-than-50 rows to be loaded) c.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch) c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch) c.executemany ('INSERT OR IGNORE INTO Geo VALUES(?,?,?,?)', geoBatch) print ("Loaded ", c.execute('SELECT COUNT(*) FROM Tweets').fetchall()[0], " Tweet rows") print ("Loaded ", c.execute('SELECT COUNT(*) FROM User').fetchall()[0], " User rows") print ("Loaded ", c.execute('SELECT COUNT(*) FROM Geo').fetchall()[0], " Geo rows") f.close() wFD.close() fdErr.close() c.close() conn.commit() conn.close() end = time.time() print("Part d file writing took ", (end-start), ' seconds.') ``` __How does the runtime compare with part-c?__ Compared to part-c it took: __e. Re-run the previous step with a batching size of 1000 (i.e. by inserting 1000 rows at a time with executemany).__ ``` start = time.time() #open the database text file f = open("THF_db.txt", 'r', encoding='utf-8') fdErr = open('THF_error.txt', 'w', errors = 'replace') tweetBatch = [] userBatch = [] geoBatch = [] loadCounter = 0 # There is a total of 1,000,000 tweets, but we will do a for-loop here for i in range(1000000): line = f.readline() try: tweetDict = json.loads(line) # This is the dictionary for tweet info loadCounter = loadCounter + 1 #------------------------------------ #Tweet Table newRowTweet = [] # hold individual values of to-be-inserted row tweetKeys = ['id_str','created_at','text','source','in_reply_to_user_id', 'in_reply_to_screen_name', 'in_reply_to_status_id', 'retweet_count', 'contributors'] for key in tweetKeys: # For each dictionary key we want if tweetDict[key] == 'null' or tweetDict[key] == '': newRowTweet.append(None) #null else: newRowTweet.append(tweetDict[key]) # use value as-is #Adds in user_id userDict = tweetDict['user'] # This the the dictionary for user information newRowTweet.append(userDict['id']) # User id/ foreign key #Adds in geo_id geoDict = tweetDict['geo'] if tweetDict['geo']: newRowTweet.append(str(tweetDict['geo']['coordinates'])) #geo_id is the latitude/longitude as a string else: newRowTweet.append(None) # Geo info is missing #batching if loadCounter < 1000: # Batching 1 at a time tweetBatch.append(newRowTweet) else: c.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch) tweetBatch = [] # Reset the list of batched tweets #------------------------------------ #User Table newRowUser = [] # hold individual values of to-be-inserted row for user table userKeys = ['id', 'name', 'screen_name', 'description', 'friends_count'] for key in userKeys: # For each dictionary key we want if userDict[key] == 'null' or userDict[key] == '': newRowUser.append(None) # proper NULL else: newRowUser.append(userDict[key]) # use value as-is #batching if loadCounter < 1000: # Batching 1 at a time userBatch.append(newRowUser) else: c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch) loadCounter = 0 userBatch = [] # Reset the list of batched users #------------------------------------ #Geo Table newRowGeo = [] # hold individual values of to-be-inserted row for geo table geoKeys = ['id','type','latitude', 'longitude'] if tweetDict['geo'] == 'null' or tweetDict['geo'] == '' or tweetDict['geo'] is None: #do nothing continue else: #id newRowGeo.append(str(tweetDict['geo']['coordinates'])) #type newRowGeo.append(tweetDict['geo']['type']) #latitude newRowGeo.append(tweetDict['geo']['coordinates'][0]) #longitude newRowGeo.append(tweetDict['geo']['coordinates'][1]) if loadCounter < 1000: # Batching 1 at a time geoBatch.append(newRowGeo) else: c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?)', geoBatch) loadCounter = 0 geoBatch = [] # Reset the list of batched geos except ValueError: # Handle the error of JSON parsing fdErr.write(line.decode() + '\n') # Final batch (the remaining less-than-50 rows to be loaded) c.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch) c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch) c.executemany ('INSERT OR IGNORE INTO Geo VALUES(?,?,?,?)', geoBatch) print ("Loaded ", c.execute('SELECT COUNT(*) FROM Tweets').fetchall()[0], " Tweet rows") print ("Loaded ", c.execute('SELECT COUNT(*) FROM User').fetchall()[0], " User rows") print ("Loaded ", c.execute('SELECT COUNT(*) FROM Geo').fetchall()[0], " Geo rows") f.close() wFD.close() fdErr.close() c.close() conn.commit() conn.close() end = time.time() print("Part e file writing took ", (end-start), ' seconds.') ``` __How does the runtime compare when batching is used?__ The runtime with batching: ### Problem 2 __a. Write and execute SQL queries to do the following. Don’t forget to report the running times in each part and the code you used.__ __i. Find tweets where tweet id_str contains “55” or “88” anywhere in the column__ ``` start = time.time() c.execute('SELECT * FROM Tweets WHERE id LIKE "%55%" or id LIKE "%88%" ').fetchall() end = time.time() print("Part i query took ", (end-start), ' seconds.') c.execute('SELECT * FROM Tweets WHERE id LIKE "%55%" or id LIKE "%88%" ').fetchall() ``` __ii. Find how many unique values are there in the “in_reply_to_user_id” column__ ``` start = time.time() c.execute('SELECT COUNT(DISTINCT in_reply_to_user_id) AS num_replies FROM Tweets').fetchall() end = time.time() print("Part ii query took ", (end-start), ' seconds.') c.execute('SELECT COUNT(DISTINCT in_reply_to_user_id) AS num_replies FROM Tweets').fetchall() ``` __iii. Find the tweet(s) with the shortest, longest and average length text message.__ ``` start = time.time() c.execute('SELECT MIN(LENGTH(Text)) AS shortest, MAX(LENGTH(Text)) AS longest, AVG(LENGTH(Text)) \ AS average FROM Tweets').fetchall() end = time.time() print("Part iii query took ", (end-start), ' seconds.') start = time.time() c.execute('SELECT * FROM TWEETS WHERE LENGTH(Text) IN (1,434,68.83193998521863)').fetchall() end = time.time() print("Part iii query took ", (end-start), ' seconds.') ``` __iv. Find the average longitude and latitude value for each user name.__ ``` start = time.time() c.execute('SELECT screen_name, AVG(latitude), AVG(longitude) FROM User \ JOIN Tweets ON User.ID=Tweets.user_id \ JOIN Geo ON Tweets.geo_id=Geo.ID \ GROUP BY screen_name').fetchall() end = time.time() print("Part iv query took ", (end-start), ' seconds.') c.execute('SELECT screen_name, AVG(latitude), AVG(longitude) FROM User \ JOIN Tweets ON User.ID=Tweets.user_id \ JOIN Geo ON Tweets.geo_id=Geo.ID \ GROUP BY screen_name').fetchall() ``` __v. Find how many known/unknown locations there were in total (e.g., 50,000 known, 950,000 unknown, 5% locations are available)__ ``` c.execute('SELECT (COUNT(*)-COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END)) \ ,COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END), \ ROUND((COUNT(*)-COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END)) * 100.0 / \ COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END), 1)FROM Tweets').fetchall() start = time.time() c.execute('SELECT (COUNT(*)-COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END)) \ ,COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END), \ ROUND((COUNT(*)-COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END)) * 100.0 / \ COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END), 1)FROM Tweets').fetchall() end = time.time() print("Part v query took ", (end-start), ' seconds.') ``` __vi. Re-execute the query in part iv) 10 times and 100 times and measure the total runtime (just re-run the same exact query multiple times using a for-loop). Does the runtime scale linearly? (i.e., does it take 10X and 100X as much time?)__ ``` start = time.time() for i in range(10): c.execute('SELECT screen_name, AVG(latitude), AVG(longitude) FROM User \ JOIN Tweets ON User.ID=Tweets.user_id \ JOIN Geo ON Tweets.geo_id=Geo.ID \ GROUP BY screen_name').fetchall() end = time.time() print("Part iv 10x query took ", (end-start), ' seconds.') start = time.time() for i in range(100): c.execute('SELECT screen_name, AVG(latitude), AVG(longitude) FROM User \ JOIN Tweets ON User.ID=Tweets.user_id \ JOIN Geo ON Tweets.geo_id=Geo.ID \ GROUP BY screen_name').fetchall() end = time.time() print("Part iv 100x query took ", (end-start), ' seconds.') ``` __b. Write python code that is going to read the locally saved tweet data file from 1-b and perform the equivalent computation for parts 2-i and 2-ii only. How does the runtime compare to the SQL queries?__ ``` #i - c.execute('SELECT * FROM Tweets WHERE id LIKE "%55%" or id LIKE "%88%" ').fetchall() import pandas as pd start = time.time() f = open("THF_db.txt", 'r', encoding='utf-8') data = [] labels = ['id_str','in_reply_to_user_id'] error_tally =0 # Loop through the 1,000,000 tweets in the text file for i in range(1000000): line = f.readline() try: tweetDict = json.loads(line) # This is the dictionary for tweet info data.append((tweetDict["id_str"], tweetDict["in_reply_to_user_id"])) except: #catch any error error_tally+=1 df=pd.DataFrame.from_records(data,columns=labels) f.close() df_end = df[df['id_str'].astype(str).str.contains('55|88')] end = time.time() print("Part 2b-i loop took ", (end-start), ' seconds.') df_end.head(10) #ii - c.execute('SELECT COUNT(DISTINCT in_reply_to_user_id) AS num_replies FROM Tweets').fetchall() import pandas as pd start = time.time() f = open("THF_db.txt", 'r', encoding='utf-8') data = [] labels = ['id_str','in_reply_to_user_id'] error_tally =0 # Loop through the 1,000,000 tweets in the text file for i in range(1000000): line = f.readline() try: tweetDict = json.loads(line) # This is the dictionary for tweet info data.append((tweetDict["id_str"], tweetDict["in_reply_to_user_id"])) except: #catch any error error_tally+=1 df=pd.DataFrame.from_records(data,columns=labels) f.close() df_end = df['in_reply_to_user_id'].value_counts(ascending=False) end = time.time() print("Part 2b-ii loop took ", (end-start), ' seconds.') df_end.head(10) ``` ### Problem 3 __a. Export the contents of the User table from a SQLite table into a sequence of INSERT statements within a file. This is very similar to what you already did in Assignment 4. However, you have to add a unique ID column which has to be a string (you cannot use numbers). Hint: you can replace digits with letters, e.g., chr(ord('a')+1) gives you a 'b' and chr(ord('a')+2) returns a 'c'__ ``` #import sqlite3 def generateInsertStatements(tblName): conn = sqlite3.connect('Tweets_Database_THF1.db') # Using HW3 SQLite DB (preloaded) c = conn.cursor() # Open file for export fd = open(tblName+'.txt', 'w') tblRows = c.execute('SELECT * FROM %s' % tblName) for row in tblRows: fd.write("INSERT INTO %s VALUES %s;\n" % (tblName, str(row))) fd.close() c.close() conn.close() start = time.time() generateInsertStatements('User') end = time.time() print("Part 3a loop took ", (end-start), ' seconds.') ``` Part 2b-ii loop took 314.5617868900299 seconds. __b. Create the same collection of INSERT for the User table by reading data from the local tweet file that you have saved earlier.__ ``` def generateInsertStatements_b(tblName): #open the database text file f = open("THF_db.txt", 'r', encoding='utf-8') #open the file to write to fd = open(tblName+'.txt', 'w') err=0 for i in range(1000000): line = f.readline() try: tweetDict = json.loads(line) # This is the dictionary for tweet info userDict = tweetDict['user'] #User Table newRowUser = [] # hold individual values of to-be-inserted row for user table userKeys = ['id', 'name', 'screen_name', 'description', 'friends_count'] for key in userKeys: # For each dictionary key we want if userDict[key] == 'null' or userDict[key] == '': newRowUser.append(None) # proper NULL else: newRowUser.append(userDict[key]) # use value as-is fd.write("INSERT INTO %s VALUES %s;\n" % (tblName, str((newRowUser)))) except: err+=1 f.close() fd.close() start = time.time() generateInsertStatements_b('User') end = time.time() print("Part 3b loop took ", (end-start), ' seconds.') ``` __How do these compare in runtime? Which method was faster?__ Comparing the runtime: ### Problem 4 __4. Export all three tables (Tweet, User and Geo tables) from the database into a |-separated text file (each value in a row should be separated by |). You do not generate SQL INSERT statements, just raw |-separated text data.__ ``` #import sqlite3 #import pandas as pd conn = sqlite3.connect('Tweets_Database_THF1.db') c = conn.cursor() df_tweets_read = pd.read_sql_query("SELECT * FROM Tweets;", conn) #tweets df_user_read = pd.read_sql_query("SELECT * FROM User;", conn) #user df_geo_read = pd.read_sql_query("SELECT * FROM Geo;", conn) #geo df_tweets_write = df_tweets_read.to_csv("tweets_table.txt", sep ='|') #tweets df_user_write = df_user_read.to_csv("user_table.txt", sep ='|') #user df_geo_write = df_geo_read.to_csv("geo_table.txt", sep ='|') #geo c.close() conn.commit() conn.close() ``` __a. For the Geo table, add a new column with relative distance from a fixed point which is the location of CDM (41.878668, -87.625555). You can simply treat it as a point-to-point Euclidean distance (although bonus points for finding a real distance in miles) and round the longitude and latitude columns to a maximum of 4 digits after the decimal.__ ``` import sqlite3 import pandas as pd import numpy as np conn = sqlite3.connect('Tweets_Database_THF1.db') c = conn.cursor() df_geo_read = pd.read_sql_query("SELECT * FROM Geo;", conn) #geo df_geo_read['Latitude'] = df_geo_read.Latitude.round(4) df_geo_read['Longitude'] = df_geo_read.Longitude.round(4) df_geo_read['distance'] = (df_geo_read.Latitude.sub(41.878668).pow(2).add(df_geo_read.Longitude.sub(-87.625555).pow(2))).pow(.5).round(4) df_geo_write = df_geo_read.to_csv("geo_table.txt", sep ='|') #geo c.close() conn.commit() conn.close() df_geo_read.head(10) ``` __b. For the Tweet table, add two new columns from the User table (“name” and “screen_name”) in addition to existing columns.__ ``` import sqlite3 import pandas as pd conn = sqlite3.connect('Tweets_Database_THF1.db') c = conn.cursor() df_tweets_read = pd.read_sql_query("SELECT * FROM Tweets;", conn) #Tweets df_user_read = pd.read_sql_query("SELECT * FROM User;", conn) #User new_df = pd.merge(df_tweets_read, df_user_read, how='left', left_on='User_ID',right_on='ID') new_df = new_df.drop(['DESCRIPTION','FRIENDS_COUNT','ID_y'], axis=1) df_tweets_write = new_df.to_csv("tweet_table.txt", sep ='|') #Tweets written c.close() conn.commit() conn.close() new_df.head(10) ``` __c. For the User table file add a column that specifies how many tweets by that user are currently in the database. That is, your output file should contain all of the columns from the User table, plus the new column with tweet count. You do not need to modify the User table, just create the output text file. What is the name of the user with most tweets?__ ``` #x = new_df['User_ID'].value_counts() #x.columns = ['User_ID','tweets_count'] #x.head() z = new_df['User_ID'] import sqlite3 import pandas as pd conn = sqlite3.connect('Tweets_Database_THF1.db') c = conn.cursor() #df_user_read = pd.read_sql_query("SELECT * FROM User;", conn) #User #df_tweets_read = pd.read_sql_query("SELECT * FROM Tweets;", conn) #Tweets new_df['tweets_count'] = new_df.groupby('User_ID')['User_ID'].transform('count') #from last part join_df = new_df[['User_ID','tweets_count']] #dataframe with only id and tweetcount newer_df = pd.merge(df_user_read, join_df, left_on='ID', right_on='User_ID') clean_df = newer_df.sort_values(by=['tweets_count'], ascending=False).drop_duplicates() df_user_write = clean_df.to_csv("user_table.txt", sep ='|') #user c.close() conn.commit() conn.close() clean_df.head(10) newer_df.head(10) ```
github_jupyter
# Tutorial of Node Schematas - PI & TwoSymbol Visualization of schematas for simple boolean nodes (automatas) ``` %load_ext autoreload %autoreload 2 %matplotlib inline from __future__ import division import numpy as np import pandas as pd from IPython.display import Image, display import cana from cana.datasets.bools import * from cana.drawing.canalizing_map import draw_canalizing_map_graphviz n = OR() print( n) print( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False))) print( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False))) print( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print() print( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print() dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) n = CONTRADICTION() n.name = 'Con' print( n) print( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False))) print( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False))) print( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print() print( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) n = XOR() print( n) print( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False))) print( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False))) print( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print() print( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print for input in [0,1]: for ts,per,sms in n._two_symbols[input]: print( 'TS: %s | PermIdx: %s | SameIdx: %s' % (ts, per,sms)) dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) n = AND() print( n) print( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False))) print( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False))) print( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print() print( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) n = COPYx1() n.name = 'CPx1' print( n) print( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False))) print( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False))) print('k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print print( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print() dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) n = RULE90() n.name = 'R90' print( n) print( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False))) print( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False))) print( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print() print( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print() dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) n = RULE110() n.name = 'R110' print( n) print( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False))) print( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False))) print( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print() print( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper')) print() dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) ```
github_jupyter