code
stringlengths
2.5k
6.36M
kind
stringclasses
2 values
parsed_code
stringlengths
0
404k
quality_prob
float64
0
0.98
learning_prob
float64
0.03
1
# Транформеры для решения seq2seq задач Seq2seq - наверное самая общая формальная постановка задачи в NLP. Нужно из произвольной последовательности получить какую-то другую последовательность. И в отличие от разметки последовательности (sequence labelling) не требуется, чтобы обе последовательности совпадали по длине. Даже стандартную задачу классификации можно решать как seq2seq - можно рассматривать метку класса как последовательность длинны 1. А трансформеры - sota архитектура для seq2seq задач. Мы не будем подробно разбирать устройство транформеров, если вам интересно вы можете поразбираться вот с этими материалами: Оригинальная статья (сложновато) - https://arxiv.org/pdf/1706.03762.pdf https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/ https://jalammar.github.io/illustrated-transformer/ https://www.youtube.com/watch?v=iDulhoQ2pro https://www.youtube.com/watch?v=TQQlZhbC5ps Самый известный туториал (на торче) - https://nlp.seas.harvard.edu/2018/04/03/attention.html Трансформеры будут подробно разбираться на курсе глубокого обучения (по выбору) на втором курсе. Пока просто попробуем обучать модель на задаче машинного перевода. Для таких задач лучше всего использовать предобученные модели, но если у вас будет какая-то специфичная seq2seq задача, то имеет смысл попробовать обучить трансформер с нуля и в этой тертрадке вам нужно будет поменять только часть с загрузкой данных. ``` import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import Whitespace from tokenizers.trainers import BpeTrainer import os import re import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import StratifiedShuffleSplit, train_test_split from string import punctuation from collections import Counter from IPython.display import Image from IPython.core.display import HTML import matplotlib.pyplot as plt %matplotlib inline tokenizer_en = Tokenizer.from_file("./torch_weights/tokenizer_en") tokenizer_ru = Tokenizer.from_file("./torch_weights/tokenizer_ru") ``` Переводим текст в индексы вот таким образом. В начало добавляем токен '[CLS]', а в конец '[SEP]'. Если вспомните занятие по языковому моделированию, то там мы добавляли "\<start>" и "\<end>" - cls и sep по сути тоже самое. Вы поймете почему именно cls и sep, а не start и end, если подробнее поразбираетесь с устройством трансформеров ``` def encode(text, tokenizer, max_len): return [tokenizer.token_to_id('[CLS]')] + tokenizer.encode(text).ids[:max_len] + [tokenizer.token_to_id('[SEP]')] # важно следить чтобы индекс паддинга совпадал в токенизаторе с value в pad_sequences PAD_IDX = tokenizer_ru.token_to_id('[PAD]') PAD_IDX # ограничимся длинной в 30 и 35 (разные чтобы показать что в seq2seq не нужна одинаковая длина) max_len_en, max_len_ru = 30, 35 ``` # Код трансформера Дальше код модели, он взят вот отсюда (с небольшими изменениями) - https://pytorch.org/tutorials/beginner/transformer_tutorial.html Там есть комментарии по каждому этапу ``` from torch import Tensor import torch import torch.nn as nn from torch.nn import Transformer import math DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # helper Module that adds positional encoding to the token embedding to introduce a notion of word order. class PositionalEncoding(nn.Module): def __init__(self, emb_size: int, dropout: float, maxlen: int = 150): super(PositionalEncoding, self).__init__() den = torch.exp(- torch.arange(0, emb_size, 2)* math.log(10000) / emb_size) pos = torch.arange(0, maxlen).reshape(maxlen, 1) pos_embedding = torch.zeros((maxlen, emb_size)) pos_embedding[:, 0::2] = torch.sin(pos * den) pos_embedding[:, 1::2] = torch.cos(pos * den) pos_embedding = pos_embedding.unsqueeze(-2) self.dropout = nn.Dropout(dropout) self.register_buffer('pos_embedding', pos_embedding) def forward(self, token_embedding: Tensor): return self.dropout(token_embedding + self.pos_embedding[:token_embedding.size(0), :]) # helper Module to convert tensor of input indices into corresponding tensor of token embeddings class TokenEmbedding(nn.Module): def __init__(self, vocab_size: int, emb_size): super(TokenEmbedding, self).__init__() self.embedding = nn.Embedding(vocab_size, emb_size) self.emb_size = emb_size def forward(self, tokens: Tensor): return self.embedding(tokens.long()) * math.sqrt(self.emb_size) # Seq2Seq Network class Seq2SeqTransformer(nn.Module): def __init__(self, num_encoder_layers: int, num_decoder_layers: int, emb_size: int, nhead: int, src_vocab_size: int, tgt_vocab_size: int, dim_feedforward: int = 512, dropout: float = 0.1): super(Seq2SeqTransformer, self).__init__() self.transformer = Transformer(d_model=emb_size, nhead=nhead, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers, dim_feedforward=dim_feedforward, dropout=dropout) self.generator = nn.Linear(emb_size, tgt_vocab_size) self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size) self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size) self.positional_encoding = PositionalEncoding( emb_size, dropout=dropout) def forward(self, src: Tensor, trg: Tensor, src_mask: Tensor, tgt_mask: Tensor, src_padding_mask: Tensor, tgt_padding_mask: Tensor, memory_key_padding_mask: Tensor): src_emb = self.positional_encoding(self.src_tok_emb(src)) # print('pos inp') tgt_emb = self.positional_encoding(self.tgt_tok_emb(trg)) # print('pos dec') outs = self.transformer(src_emb, tgt_emb, src_mask, tgt_mask, None, src_padding_mask, tgt_padding_mask, memory_key_padding_mask) # print('pos out') x = self.generator(outs) # print('gen') return x def encode(self, src: Tensor, src_mask: Tensor): return self.transformer.encoder(self.positional_encoding( self.src_tok_emb(src)), src_mask) def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor): return self.transformer.decoder(self.positional_encoding( self.tgt_tok_emb(tgt)), memory, tgt_mask) # During training, we need a subsequent word mask that will prevent model to look into the future words when making predictions. We will also need masks to hide source and target padding tokens. Below, let’s define a function that will take care of both. def generate_square_subsequent_mask(sz): mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask def create_mask(src, tgt): src_seq_len = src.shape[0] tgt_seq_len = tgt.shape[0] tgt_mask = generate_square_subsequent_mask(tgt_seq_len) src_mask = torch.zeros((src_seq_len, src_seq_len),device=DEVICE).type(torch.bool) src_padding_mask = (src == PAD_IDX).transpose(0, 1) tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1) return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask ``` Обратите внимание на то как мы подаем данные в модель ``` torch.manual_seed(0) EN_VOCAB_SIZE = tokenizer_en.get_vocab_size() RU_VOCAB_SIZE = tokenizer_ru.get_vocab_size() EMB_SIZE = 256 NHEAD = 8 FFN_HID_DIM = 512 NUM_ENCODER_LAYERS = 2 NUM_DECODER_LAYERS = 2 transformer = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE, NHEAD, EN_VOCAB_SIZE, RU_VOCAB_SIZE, FFN_HID_DIM) transformer = torch.load("./torch_weights/model") ``` --- ### Homework starts here Disclaime: Этой домашкой я бы хотел закрыть пропуски в домашках 6, 8, 9. Остальные (скорее всего 10/11/12) я постараюсь досдать --- ``` from typing import * def batch_encode(texts: List[str], max_len: int) -> Tuple[Tensor, Tensor]: encodings = tokenizer_en.encode_batch(texts) encodings = [ [tokenizer_en.token_to_id('[CLS]')] + encoding.ids[:max_len] + [tokenizer_en.token_to_id('[SEP]')] for encoding in encodings ] outputs = [[tokenizer_ru.token_to_id('[CLS]')]]*len(texts) input_ids_pad = torch.nn.utils.rnn.pad_sequence( [torch.LongTensor(input_ids) for input_ids in encodings], batch_first=False, padding_value=PAD_IDX ).to(DEVICE) output_ids_pad = torch.nn.utils.rnn.pad_sequence( [torch.LongTensor(output_ids) for output_ids in outputs], batch_first=False, padding_value=PAD_IDX ).to(DEVICE) return input_ids_pad, output_ids_pad SEP_IDX = tokenizer_ru.token_to_id("[SEP]") def batch_decode(output_ids_pad: Tensor) -> List[str]: batch_size = output_ids_pad.shape[1] decode = [] for sequence_idx in range(batch_size): sequence = output_ids_pad[:, sequence_idx].cpu().numpy() # to ensure it is on cpu filtered_sequence = [] for token_id in sequence: if token_id not in {PAD_IDX, SEP_IDX}: filtered_sequence.append(token_id) else: # found sep or pad, stopping break decode.append(tokenizer_ru.decode(filtered_sequence)) return decode def translate(texts: List[str], max_input_len: int = 30, max_output_len: int = 35): # now working with batches! input_ids_pad, output_ids_pad = batch_encode(texts, max_input_len) (texts_en_mask, texts_ru_mask, texts_en_padding_mask, texts_ru_padding_mask) = create_mask(input_ids_pad, output_ids_pad) logits = transformer(input_ids_pad, output_ids_pad, texts_en_mask, texts_ru_mask, texts_en_padding_mask, texts_ru_padding_mask, texts_en_padding_mask) pred = torch.softmax(logits, -1).argmax(-1) # it needs softmaxing for i in range(max_output_len): output_ids_pad = torch.cat( (output_ids_pad, pred) ) (texts_en_mask, texts_ru_mask, texts_en_padding_mask, texts_ru_padding_mask) = create_mask(input_ids_pad, output_ids_pad) logits = transformer(input_ids_pad, output_ids_pad, texts_en_mask, texts_ru_mask, texts_en_padding_mask, texts_ru_padding_mask, texts_en_padding_mask) # argmax over last token + unsqueeze to create seq_length dimension pred = torch.softmax(logits, -1).argmax(-1)[-1].unsqueeze(0) return batch_decode(output_ids_pad) translate(["Example", "Also another cruel and super-evil megaexample"]) big_news_text = """ More than half of U.S. states have lowered some barriers to voting since the 2020 election, making permanent practices that helped produce record voter turnout during the coronavirus pandemic — a striking countertrend to the passage this year of restrictions in key Republican-controlled states. New laws in states from Vermont to California expand access to the voting process on a number of fronts, such as offering more options for early and mail voting, protecting mail ballots from being improperly rejected and making registering to vote easier. Some states restored voting rights to people with past felony convictions or expanded options for voters with disabilities, two long-standing priorities among voting advocates. And in Virginia, a new law requires localities to receive preapproval or feedback on voting changes as a shield against racial discrimination, a first for states after the Supreme Court struck down a key part of the federal Voting Rights Act in 2013. Kentucky Secretary of State Michael Adams, a Republican who fought for his state’s policy changes, said the GOP needs to “stop being scared of voters.” “Let them vote, and go out and make the case,” he said in an interview, adding: “I want Republicans to succeed. I think it’s an unforced error to shoot themselves in the foot in these states by shrinking access. You don’t need to do that.” Seventy-one new laws easing voting rules are poised to benefit 63 million eligible voters in 28 states, or about one-quarter of the U.S. voting population, according to the Voting Rights Lab report, which tracked policy changes as of June 13. Thirty-one new laws in 18 states create more barriers to the ballot box, affecting 36 million eligible voters, or 15 percent of the national voting population, the report stated. Legislative debates over restrictions are underway in key states such as Texas and Pennsylvania, leaving open the possibility that new limitations affecting millions more voters still will be enacted this year. """ ## part of text from here: https://www.washingtonpost.com/politics/voting-rights-expansion-states/2021/06/22/1699a6b0-cf87-11eb-8014-2f3926ca24d9_story.html !pip install -q sentence-splitter from sentence_splitter import SentenceSplitter splitter = SentenceSplitter('en') def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] # target function to translate big texts # works by splitting text into sentences and batched translation of those sentences def translate_big_text(text, batch_size: int = 8): sentences = splitter.split(text) translated_sentences = [] for sentences_batch in batch(sentences, n=batch_size): translated_sentences += translate(sentences_batch, 100, 100) return ".\n".join(translated_sentences) print(translate_big_text(big_news_text, 8)) ```
github_jupyter
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import Whitespace from tokenizers.trainers import BpeTrainer import os import re import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import StratifiedShuffleSplit, train_test_split from string import punctuation from collections import Counter from IPython.display import Image from IPython.core.display import HTML import matplotlib.pyplot as plt %matplotlib inline tokenizer_en = Tokenizer.from_file("./torch_weights/tokenizer_en") tokenizer_ru = Tokenizer.from_file("./torch_weights/tokenizer_ru") def encode(text, tokenizer, max_len): return [tokenizer.token_to_id('[CLS]')] + tokenizer.encode(text).ids[:max_len] + [tokenizer.token_to_id('[SEP]')] # важно следить чтобы индекс паддинга совпадал в токенизаторе с value в pad_sequences PAD_IDX = tokenizer_ru.token_to_id('[PAD]') PAD_IDX # ограничимся длинной в 30 и 35 (разные чтобы показать что в seq2seq не нужна одинаковая длина) max_len_en, max_len_ru = 30, 35 from torch import Tensor import torch import torch.nn as nn from torch.nn import Transformer import math DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # helper Module that adds positional encoding to the token embedding to introduce a notion of word order. class PositionalEncoding(nn.Module): def __init__(self, emb_size: int, dropout: float, maxlen: int = 150): super(PositionalEncoding, self).__init__() den = torch.exp(- torch.arange(0, emb_size, 2)* math.log(10000) / emb_size) pos = torch.arange(0, maxlen).reshape(maxlen, 1) pos_embedding = torch.zeros((maxlen, emb_size)) pos_embedding[:, 0::2] = torch.sin(pos * den) pos_embedding[:, 1::2] = torch.cos(pos * den) pos_embedding = pos_embedding.unsqueeze(-2) self.dropout = nn.Dropout(dropout) self.register_buffer('pos_embedding', pos_embedding) def forward(self, token_embedding: Tensor): return self.dropout(token_embedding + self.pos_embedding[:token_embedding.size(0), :]) # helper Module to convert tensor of input indices into corresponding tensor of token embeddings class TokenEmbedding(nn.Module): def __init__(self, vocab_size: int, emb_size): super(TokenEmbedding, self).__init__() self.embedding = nn.Embedding(vocab_size, emb_size) self.emb_size = emb_size def forward(self, tokens: Tensor): return self.embedding(tokens.long()) * math.sqrt(self.emb_size) # Seq2Seq Network class Seq2SeqTransformer(nn.Module): def __init__(self, num_encoder_layers: int, num_decoder_layers: int, emb_size: int, nhead: int, src_vocab_size: int, tgt_vocab_size: int, dim_feedforward: int = 512, dropout: float = 0.1): super(Seq2SeqTransformer, self).__init__() self.transformer = Transformer(d_model=emb_size, nhead=nhead, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers, dim_feedforward=dim_feedforward, dropout=dropout) self.generator = nn.Linear(emb_size, tgt_vocab_size) self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size) self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size) self.positional_encoding = PositionalEncoding( emb_size, dropout=dropout) def forward(self, src: Tensor, trg: Tensor, src_mask: Tensor, tgt_mask: Tensor, src_padding_mask: Tensor, tgt_padding_mask: Tensor, memory_key_padding_mask: Tensor): src_emb = self.positional_encoding(self.src_tok_emb(src)) # print('pos inp') tgt_emb = self.positional_encoding(self.tgt_tok_emb(trg)) # print('pos dec') outs = self.transformer(src_emb, tgt_emb, src_mask, tgt_mask, None, src_padding_mask, tgt_padding_mask, memory_key_padding_mask) # print('pos out') x = self.generator(outs) # print('gen') return x def encode(self, src: Tensor, src_mask: Tensor): return self.transformer.encoder(self.positional_encoding( self.src_tok_emb(src)), src_mask) def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor): return self.transformer.decoder(self.positional_encoding( self.tgt_tok_emb(tgt)), memory, tgt_mask) # During training, we need a subsequent word mask that will prevent model to look into the future words when making predictions. We will also need masks to hide source and target padding tokens. Below, let’s define a function that will take care of both. def generate_square_subsequent_mask(sz): mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask def create_mask(src, tgt): src_seq_len = src.shape[0] tgt_seq_len = tgt.shape[0] tgt_mask = generate_square_subsequent_mask(tgt_seq_len) src_mask = torch.zeros((src_seq_len, src_seq_len),device=DEVICE).type(torch.bool) src_padding_mask = (src == PAD_IDX).transpose(0, 1) tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1) return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask torch.manual_seed(0) EN_VOCAB_SIZE = tokenizer_en.get_vocab_size() RU_VOCAB_SIZE = tokenizer_ru.get_vocab_size() EMB_SIZE = 256 NHEAD = 8 FFN_HID_DIM = 512 NUM_ENCODER_LAYERS = 2 NUM_DECODER_LAYERS = 2 transformer = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE, NHEAD, EN_VOCAB_SIZE, RU_VOCAB_SIZE, FFN_HID_DIM) transformer = torch.load("./torch_weights/model") from typing import * def batch_encode(texts: List[str], max_len: int) -> Tuple[Tensor, Tensor]: encodings = tokenizer_en.encode_batch(texts) encodings = [ [tokenizer_en.token_to_id('[CLS]')] + encoding.ids[:max_len] + [tokenizer_en.token_to_id('[SEP]')] for encoding in encodings ] outputs = [[tokenizer_ru.token_to_id('[CLS]')]]*len(texts) input_ids_pad = torch.nn.utils.rnn.pad_sequence( [torch.LongTensor(input_ids) for input_ids in encodings], batch_first=False, padding_value=PAD_IDX ).to(DEVICE) output_ids_pad = torch.nn.utils.rnn.pad_sequence( [torch.LongTensor(output_ids) for output_ids in outputs], batch_first=False, padding_value=PAD_IDX ).to(DEVICE) return input_ids_pad, output_ids_pad SEP_IDX = tokenizer_ru.token_to_id("[SEP]") def batch_decode(output_ids_pad: Tensor) -> List[str]: batch_size = output_ids_pad.shape[1] decode = [] for sequence_idx in range(batch_size): sequence = output_ids_pad[:, sequence_idx].cpu().numpy() # to ensure it is on cpu filtered_sequence = [] for token_id in sequence: if token_id not in {PAD_IDX, SEP_IDX}: filtered_sequence.append(token_id) else: # found sep or pad, stopping break decode.append(tokenizer_ru.decode(filtered_sequence)) return decode def translate(texts: List[str], max_input_len: int = 30, max_output_len: int = 35): # now working with batches! input_ids_pad, output_ids_pad = batch_encode(texts, max_input_len) (texts_en_mask, texts_ru_mask, texts_en_padding_mask, texts_ru_padding_mask) = create_mask(input_ids_pad, output_ids_pad) logits = transformer(input_ids_pad, output_ids_pad, texts_en_mask, texts_ru_mask, texts_en_padding_mask, texts_ru_padding_mask, texts_en_padding_mask) pred = torch.softmax(logits, -1).argmax(-1) # it needs softmaxing for i in range(max_output_len): output_ids_pad = torch.cat( (output_ids_pad, pred) ) (texts_en_mask, texts_ru_mask, texts_en_padding_mask, texts_ru_padding_mask) = create_mask(input_ids_pad, output_ids_pad) logits = transformer(input_ids_pad, output_ids_pad, texts_en_mask, texts_ru_mask, texts_en_padding_mask, texts_ru_padding_mask, texts_en_padding_mask) # argmax over last token + unsqueeze to create seq_length dimension pred = torch.softmax(logits, -1).argmax(-1)[-1].unsqueeze(0) return batch_decode(output_ids_pad) translate(["Example", "Also another cruel and super-evil megaexample"]) big_news_text = """ More than half of U.S. states have lowered some barriers to voting since the 2020 election, making permanent practices that helped produce record voter turnout during the coronavirus pandemic — a striking countertrend to the passage this year of restrictions in key Republican-controlled states. New laws in states from Vermont to California expand access to the voting process on a number of fronts, such as offering more options for early and mail voting, protecting mail ballots from being improperly rejected and making registering to vote easier. Some states restored voting rights to people with past felony convictions or expanded options for voters with disabilities, two long-standing priorities among voting advocates. And in Virginia, a new law requires localities to receive preapproval or feedback on voting changes as a shield against racial discrimination, a first for states after the Supreme Court struck down a key part of the federal Voting Rights Act in 2013. Kentucky Secretary of State Michael Adams, a Republican who fought for his state’s policy changes, said the GOP needs to “stop being scared of voters.” “Let them vote, and go out and make the case,” he said in an interview, adding: “I want Republicans to succeed. I think it’s an unforced error to shoot themselves in the foot in these states by shrinking access. You don’t need to do that.” Seventy-one new laws easing voting rules are poised to benefit 63 million eligible voters in 28 states, or about one-quarter of the U.S. voting population, according to the Voting Rights Lab report, which tracked policy changes as of June 13. Thirty-one new laws in 18 states create more barriers to the ballot box, affecting 36 million eligible voters, or 15 percent of the national voting population, the report stated. Legislative debates over restrictions are underway in key states such as Texas and Pennsylvania, leaving open the possibility that new limitations affecting millions more voters still will be enacted this year. """ ## part of text from here: https://www.washingtonpost.com/politics/voting-rights-expansion-states/2021/06/22/1699a6b0-cf87-11eb-8014-2f3926ca24d9_story.html !pip install -q sentence-splitter from sentence_splitter import SentenceSplitter splitter = SentenceSplitter('en') def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] # target function to translate big texts # works by splitting text into sentences and batched translation of those sentences def translate_big_text(text, batch_size: int = 8): sentences = splitter.split(text) translated_sentences = [] for sentences_batch in batch(sentences, n=batch_size): translated_sentences += translate(sentences_batch, 100, 100) return ".\n".join(translated_sentences) print(translate_big_text(big_news_text, 8))
0.861902
0.937153
# Recreation of the algorithms from the paper # One-dimensional elastic bar with crack (section 5.1): ### Parameters for the algorithm ``` from typing import Any, List, Tuple ``` Before starting with the problem ``` import torch # General parameters for modeling the problem time_steps = 1 num_points = 112 displacement_step = 0.0 # Initial crack crack = 0.0 crack_width = 0.025 # Material constants young_modulus = 1 poisson_ratio = 0.3 # typical for metal # lame_lambda = young_modulus*poisson_ratio/((1 + poisson_ratio)*(1 - 2*poisson_ratio)) # lame_mu = young_modulus/(2*(1 + poisson_ratio)) lame_lambda = 0 lame_mu = 0.5 G_c = 0.5*crack_width # Stress degradation function def g(x: torch.Tensor) -> torch.Tensor: """Stress degradation function. """ return (1-x)**2 # Finite differences delta = 0.00001 ``` ### Generate Gauss points and visualization points: ``` import numpy as np # As a convention we will mark variables of the type torch.Tensor with the suffix _t if its type could be uncertain. visualization_points = np.linspace(-1.0, 1.0, num=num_points*3) visualization_points_t: torch.Tensor = torch.tensor(visualization_points, dtype=torch.float).unsqueeze(dim=-1) # The Gauss points and weights are generated seperately for each region. They are generated on the reference interval [-1,1] and transposed to the actual region. # Left side of the bar: lc, lc_weights = np.polynomial.legendre.leggauss(112) lc *= (1.0-crack_width)*0.5 lc += (crack_width-1.0)*0.5 lc_weights *= (1.0-crack_width)*0.5 # Crack region: c, c_weights = np.polynomial.legendre.leggauss(112) c *= crack_width*0.5 c_weights *= crack_width*0.5 # Right side of the bar: rc, rc_weights = np.polynomial.legendre.leggauss(112) rc *= (1.0-crack_width)*0.5 rc += (1.0-crack_width)*0.5 rc_weights *= (1.0-crack_width)*0.5 gauss_points = np.concatenate((lc, c, rc)) gauss_points_t: torch.Tensor = torch.tensor(gauss_points, dtype=torch.float).unsqueeze(dim=-1) gauss_weights = np.concatenate((lc_weights, c_weights, rc_weights)) gauss_weights_t: torch.Tensor = torch.tensor(gauss_weights, dtype=torch.float).unsqueeze(dim=-1) ``` ### Initialize the neural network: The phase-field and displacement field are approximated with a fully connected neural network, with three hidden layers, each of which has 50 neurons. As an activation function $\text{tanh}$ is used, except for the last layer. In contrast to the paper the sigmoid activation function $σ(x)=\frac{1}{1+e^{-x}}$ is applied to the output neuron representing the phase-field variable (since it only takes values between 0 and 1). ``` import torch.nn as nn # PINN architecture: in_dim = 1 phase_field_dim = 1 dis_field_dim = 1 hidden_dim = 50 depth = 3 class PINN(nn.Module): def __init__(self) -> None: super().__init__() self.hidden_dim = hidden_dim self.depth = depth self.in_dim = in_dim self.out_dim = dis_field_dim + phase_field_dim self.fcs = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim) for i in range(depth-1)]) self.fcs.insert(0, nn.Linear(self.in_dim, self.hidden_dim)) self.fcs.append(nn.Linear(self.hidden_dim, self.out_dim)) # Xavier initialization: for fc in self.fcs: nn.init.xavier_uniform_(fc.weight) self.activation = nn.Tanh() self.sigmoid = nn.Sigmoid() def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): shape [bs, self.in_dim] Returns: torch.Tensor: shape [bs, self.out_dim] """ for fc in self.fcs[:-1]: x = self.activation(fc(x)) x = self.fcs[-1](x) x[:,-1] = self.sigmoid(x[:,-1]) return x # Dirichlet boundary conditions class U(nn.Module): def __init__(self, pinn: PINN=None): super().__init__() if pinn is None: self.pinn = PINN() else: self.pinn = pinn def forward(self, x: torch.Tensor) -> torch.Tensor: ones = torch.ones(x.shape, device=device) y = self.pinn(x) u_hat, phi= y[...,:dis_field_dim], y[...,dis_field_dim:] return torch.cat(((x-ones)*(x+ones)*u_hat, phi), dim=-1) ``` ### Defining the loss function: Since the goal is to minimize the variational energy, the energy is used as a loss function for the PINN. $\mathcal{L}=\mathcal{V}=\psi_e+\psi_c,$ where $\psi_e=$ and $\psi_c=$ To calculate both energies the jacobian of both $u$ and $\phi$ is required. We approximate the jacobian using finite differences. In the one dimensional case several simplifications can be made to accelerate the code. For one it holds $ϵ=\frac{1}{2}(\nabla u +\nabla u^T)=\nabla u.$ Furthermore, since $\nabla u$ is a one dimensional matrix, its value is its only Eigenvalue at the same time. At last since there is only one Eigenvalue it holds $(λ_s-|λ_s|)^2=\sum_{i=1}^n(λ_i-|λ_i|)^2.$ It is also possible to calculate the exact jacobian with `torch.autograd.functional.jacobian`, however this requires several backward passes through the neural network and is computationally way more expensive. ``` import time def jacobian(f, x: torch.Tensor, h: float, mode: str='forward', y_1: torch.Tensor=None): # Precompute this tensor for faster results: h_t: torch.Tensor = torch.ones(x.shape, device=device)*h if mode == 'forward': if y_1 is None: y_1: torch.Tensor = f(x) # shape [bs, out_dim] y_2: torch.Tensor = f(x + h_t) # shape [bs, out_dim] elif mode == 'backward': if y_1 is None: y_1: torch.Tensor = f(x - h_t) # shape [bs, out_dim] y_2: torch.Tensor = f(x)# shape [bs, out_dim] else: y_2: torch.Tensor = y_1 # shape [bs, out_dim] y_1: torch.Tensor = f(x - h_t) # shape [bs, out_dim] elif mode == 'central': y_1: torch.Tensor = f(x - 0.5*h_t) # shape [bs, out_dim] y_2: torch.Tensor = f(x + 0.5*h_t) # shape [bs, out_dim] else: print("Please enter a valid differentiation mode") return None return (y_2 - y_1)/h_t # shape [bs, out_dim] def f_e(phi: torch.Tensor, nabla_u: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: eigenvalues: torch.Tensor = nabla_u.squeeze() # shape [bs] psi_pos: torch.Tensor = (lame_lambda/8 + lame_mu/4)*(eigenvalues + torch.abs(eigenvalues))**2 psi_neg: torch.Tensor = (lame_lambda/8 + lame_mu/4)*(eigenvalues - torch.abs(eigenvalues))**2 return g(phi)*psi_pos + psi_neg, psi_pos def f_c(phi: torch.Tensor, nabla_phi: torch.Tensor, psi_pos: torch.Tensor) -> torch.Tensor: nabla_phi: torch.Tensor = nabla_phi.squeeze() # shape [bs] return G_c/(2*crack_width)*(phi**2 + (crack_width**2)*torch.abs(nabla_phi)**2) + g(phi)*psi_pos def total_energy(u: U, x: torch.Tensor, verbose: bool=False, save_loss: bool=False, fe_list: List[float]=None, fc_list: List[float]=None) -> torch.Tensor: t1: float = time.time() outputs: torch.Tensor = u(x) phi: torch.Tensor = outputs[...,dis_field_dim:] t2: float = time.time() nabla_pinn: torch.Tensor = jacobian(u, x, 0.0001, y_1=outputs) t3: float = time.time() nabla_u: torch.Tensor = nabla_pinn[:,:dis_field_dim] nabla_phi: torch.Tensor = nabla_pinn[:,dis_field_dim:] t4: float = time.time() strain_energy, psi_pos = f_e(phi, nabla_u) strain_energy = torch.sum(strain_energy*gauss_weights_t) t5: float = time.time() fracture_energy = torch.sum(f_c(phi, nabla_phi, psi_pos)*gauss_weights_t) t6: float = time.time() if verbose: print(f'U: {t2-t1} jac: {t3-t2} nabla: {t4-t3} f_e: {t5-t4} f_c: {t6-t5}') if save_loss: fe_list.append(strain_energy.detach().to('cpu').item()) fc_list.append(fracture_energy.detach().to('cpu').item()) return strain_energy + fracture_energy ``` ### Initializing the phase-field function: In the paper the phase-field is initialized via the history function. Replicating this led to wrong results however. Thus we implement an alternative way of initializing the phase-field. We create a `torch.Tensor` with the initial values of the phase-field function (1 inside of the crack, 0 outside of the crack) (this tensor can be thought of as the labels in a neural network context) and reduce the [binary cross entropy loss](https://pytorch.org/docs/stable/generated/torch.nn.BCELoss.html) between the output of the neural network and the initial values. ``` import os device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters()) init_loss = nn.BCELoss() gauss_points_t: torch.Tensor = gauss_points_t.to(device) gauss_weights_t: torch.Tensor = gauss_weights_t.to(device) phi_values_t: torch.Tensor = torch.Tensor([1.0 if abs(x)<=crack_width else 0.0 for x in gauss_points]).unsqueeze(dim=-1) phi_values_t: torch.Tensor = phi_values_t.to(device) init_losses: List[float] = [] def init_train(u: U, optimizer, x: torch.Tensor, y: torch.Tensor, limit: float) -> None: print('Initializing phase field') loss: float = limit + 1.0 while loss >= limit: optimizer.zero_grad() phi = u(x)[...,dis_field_dim:] loss_sum: torch.Tensor = init_loss(phi, y) loss_sum.backward() init_losses.append(loss_sum.detach().to("cpu").item()) loss = init_losses[-1] print(f'Epoch {len(init_losses)} Loss {loss}') optimizer.step() os.makedirs('./u', exist_ok=True) path_u_pretrained: str = f'./u/pretrained.pt' init_train(u, optimizer, gauss_points_t, phi_values_t, 0.1) torch.save(u.state_dict(), path_u_pretrained) ``` We take a short look at the results of the training, to make sure the phase-field is initialized correctly. ``` import matplotlib.pyplot as plt plt.plot(init_losses) plt.title('Phase field initialization') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u_pretrained)) model.eval() outputs = model(visualization_points_t) phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, phase_field) plt.title('Phase field') plt.ylabel('phi(x)') plt.xlabel('x') plt.show() ``` ### Training the neural network: ``` import os device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') gauss_points_t: torch.Tensor = gauss_points_t.to(device) gauss_weights_t: torch.Tensor = gauss_weights_t.to(device) losses = [] fe_list: List[float] = [] fc_list: List[float] = [] def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x, save_loss=True, fe_list, fc_list) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() def train_lbfgs(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): print(f'Epoch {i} ', end='') def closure(): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x, save_loss=True, fe_list, fc_list) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Loss {losses[-1]}') return loss_sum optimizer.step(closure) os.makedirs('./u', exist_ok=True) path_u_intermediate: str = f'./u/inter.pt' path_u: str = f'./u/final.pt' u = U().to(device) u.load_state_dict(torch.load(path_u_pretrained)) optimizer = torch.optim.Adam(u.parameters()) train(u, optimizer, total_energy, gauss_points_t, 1500) torch.save(u.state_dict(), path_u_intermediate) u = U().to(device) u.load_state_dict(torch.load(path_u_intermediate)) optimizer = torch.optim.LBFGS(u.parameters()) train_lbfgs(u, optimizer, total_energy, gauss_points_t, 50) torch.save(u.state_dict(), path_u) ``` ### Visualization ``` import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u)) model.eval() outputs = model(visualization_points_t) dis_field = outputs[...,:dis_field_dim].squeeze().detach().to('cpu').numpy() phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, phase_field) plt.title('phase field phi') plt.ylabel('phi(x)') plt.xlabel('x') plt.show() import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u)) model.eval() outputs = model(visualization_points_t) dis_field = outputs[...,:dis_field_dim].squeeze().detach().to('cpu').numpy() phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, dis_field) plt.title('displacement field u') plt.ylabel('u(x)') plt.xlabel('x') plt.show() ``` This result is in stark contrast to the paper. However looking at the problem theoretically we see that the result is correct and there must be an error with our problem setup. It holds $\psi_0^\pm=\frac{λ}{4}(λ_s\pm|λ_s|)^2+\frac{\mu}{8}\sum_{i=1}^1(λ_i\pm|\lambda_i|)^2,$ where $λ_i$ are the eigenvalues of the strain $ϵ=\frac{1}{2}(\nabla u+\nabla u^T).$ Setting $u(x)=c$ constant we obtain $\lambda_1=0$ and $f_e(x)=g(\phi)\psi_0^+(\epsilon)+\psi_0^-(\epsilon)=0$ for all $x\in[-1,1]$. Clearly this minimizes the fracture energy $\psi_e=\int_Γf_e(x)dΩ.$ Furthermore it holds $f_c(x)=\frac{G_c}{2l_0}(ϕ²+l_0²|\nablaϕ|²)+g(ϕ)\psi_0^+.$ The last term vanishes when $u(x)=c.$ When using this history function this is not possible to happen as the last term is $g(ϕ)H\left(x,t\right).$ Since $H(x,t)$ is monotonically increasing in time, the term is guaranteed to not vanish. But even with a history function it is still preferentiable for $\psi_0^+$ to be zero, as this minimizes $H(x,t)=\max\{\psi_0^+,H(x,t-1)\}$ ### Loss decrease ``` import matplotlib.pyplot as plt plt.plot(losses) plt.title('Total variational energy (loss) while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt plt.plot(fe_list) plt.title('Strain energy while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt plt.plot(fc_list) plt.title('Fracture energy while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() ``` ### Next try with the history function: We implement the same algorithm again, but this time with the history function. ``` import time H_init: torch.Tensor = torch.Tensor([1000.0 if abs(x)<=crack_width else 0.0 for x in gauss_points]).unsqueeze(dim=-1) def f_c(phi: torch.Tensor, nabla_phi: torch.Tensor, psi_pos: torch.Tensor) -> torch.Tensor: nabla_phi: torch.Tensor = nabla_phi.squeeze() # shape [bs] H: torch.Tensor = torch.max(psi_pos, H_init) return G_c/(2*crack_width)*(phi**2 + (crack_width**2)*torch.abs(nabla_phi)**2) + g(phi)*H import os device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') gauss_points_t: torch.Tensor = gauss_points_t.to(device) gauss_weights_t: torch.Tensor = gauss_weights_t.to(device) H_init: torch.Tensor = H_init.to(device) losses = [] fe_list: List[float] = [] fc_list: List[float] = [] def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x, save_loss=True, fe_list, fc_list) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() def train_lbfgs(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): print(f'Epoch {i} ', end='') def closure(): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x, save_loss=True, fe_list, fc_list) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Loss {losses[-1]}') return loss_sum optimizer.step(closure) os.makedirs('./u', exist_ok=True) path_u_intermediate: str = f'./u/inter.pt' path_u: str = f'./u/final.pt' u = U().to(device) optimizer = torch.optim.Adam(u.parameters()) train(u, optimizer, total_energy, gauss_points_t, 1500) torch.save(u.state_dict(), path_u_intermediate) u = U().to(device) u.load_state_dict(torch.load(path_u_intermediate)) optimizer = torch.optim.LBFGS(u.parameters()) train_lbfgs(u, optimizer, total_energy, gauss_points_t, 50) torch.save(u.state_dict(), path_u) import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u)) model.eval() outputs = model(visualization_points_t) phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, phase_field) plt.title('Phase field') plt.ylabel('phi(x)') plt.xlabel('x') plt.show() import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u)) model.eval() outputs = model(visualization_points_t) dis_field = outputs[...,:dis_field_dim].squeeze().detach().to('cpu').numpy() phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, dis_field) plt.title('displacement field u') plt.ylabel('u(x)') plt.xlabel('x') plt.show() import matplotlib.pyplot as plt plt.plot(losses) plt.title('Total variational energy (loss) while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt plt.plot(fe_list) plt.title('Strain energy while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt plt.plot(fc_list) plt.title('Fracture energy while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() ``` ### Initialization test: ``` import os import matplotlib.pyplot as plt tries = 5 gauss_points_tensor: torch.Tensor = gauss_points_tensor.to(device) weights_tensor: torch.Tensor = weights_tensor.to(device) def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') losses = [] for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() return losses losses_default = [] for i in range(tries): u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters()) print(f'Try {i}') os.makedirs('/init_test/u/', exist_ok=True) # path_pinn: str = f'.\\pinn\\ls_{i}.pt' path_u_intermediate: str = f'/init_test/u/default_try_{i}.pt' losses_default.append(train(u, optimizer, loss, gauss_points_tensor, 250)) torch.save(u.state_dict(), path_u_intermediate) for i in range(tries): plt.plot(losses_default[i]) plt.title(f'Default initialization try {i}') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import os import matplotlib.pyplot as plt tries = 5 gauss_points_tensor: torch.Tensor = gauss_points_tensor.to(device) weights_tensor: torch.Tensor = weights_tensor.to(device) class PINN(nn.Module): def __init__(self) -> None: super().__init__() self.hidden_dim = hidden_dim self.depth = depth self.in_dim = in_dim self.out_dim = dis_field_dim + phase_field_dim self.fcs = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim) for i in range(depth-1)]) self.fcs.insert(0, nn.Linear(self.in_dim, self.hidden_dim)) self.fcs.append(nn.Linear(self.hidden_dim, self.out_dim)) # Xavier initialization: for fc in self.fcs: nn.init.xavier_uniform_(fc.weight) self.activation = nn.Tanh() def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): shape [bs, self.in_dim] Returns: torch.Tensor: shape [bs, self.out_dim] """ for fc in self.fcs[:-1]: x = self.activation(fc(x)) x = self.fcs[-1](x) return x def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') losses = [] for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() return losses losses_xavier_normal = [] for i in range(tries): u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters()) print(f'Try {i}') os.makedirs('/init_test/u', exist_ok=True) # path_pinn: str = f'.\\pinn\\ls_{i}.pt' path_u_intermediate: str = f'/init_test/u/xavier_normal_try_{i}.pt' losses_xavier_normal.append(train(u, optimizer, loss, gauss_points_tensor, 250)) torch.save(u.state_dict(), path_u_intermediate) for i in range(tries): plt.plot(losses_xavier_normal[i]) plt.title(f'Xavier normal initialization try {i}') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import os import matplotlib.pyplot as plt tries = 5 gauss_points_tensor: torch.Tensor = gauss_points_tensor.to(device) weights_tensor: torch.Tensor = weights_tensor.to(device) class PINN(nn.Module): def __init__(self) -> None: super().__init__() self.hidden_dim = hidden_dim self.depth = depth self.in_dim = in_dim self.out_dim = dis_field_dim + phase_field_dim self.fcs = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim) for i in range(depth-1)]) self.fcs.insert(0, nn.Linear(self.in_dim, self.hidden_dim)) self.fcs.append(nn.Linear(self.hidden_dim, self.out_dim)) # Xavier initialization: for fc in self.fcs: nn.init.xavier_uniform_(fc.weight) self.activation = nn.Tanh() def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): shape [bs, self.in_dim] Returns: torch.Tensor: shape [bs, self.out_dim] """ for fc in self.fcs[:-1]: x = self.activation(fc(x)) x = self.fcs[-1](x) return x def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') losses = [] for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() return losses losses_xavier_uniform = [] for i in range(tries): u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters()) print(f'Try {i}') os.makedirs('/init_test/u', exist_ok=True) # path_pinn: str = f'.\\pinn\\ls_{i}.pt' path_u_intermediate: str = f'/init_test/u/xavier_uniform_try_{i}.pt' losses_xavier_uniform.append(train(u, optimizer, loss, gauss_points_tensor, 250)) torch.save(u.state_dict(), path_u_intermediate) for i in range(tries): plt.plot(losses_xavier_uniform[i]) plt.title(f'Xavier uniform initialization try {i}') plt.ylabel('loss') plt.xlabel('epochs') plt.show() ``` ### Trying different learning rates ``` import os import matplotlib.pyplot as plt lrs = [0.01, 0.0001] gauss_points_tensor: torch.Tensor = gauss_points_tensor.to(device) weights_tensor: torch.Tensor = weights_tensor.to(device) class PINN(nn.Module): def __init__(self) -> None: super().__init__() self.hidden_dim = hidden_dim self.depth = depth self.in_dim = in_dim self.out_dim = dis_field_dim + phase_field_dim self.fcs = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim) for i in range(depth-1)]) self.fcs.insert(0, nn.Linear(self.in_dim, self.hidden_dim)) self.fcs.append(nn.Linear(self.hidden_dim, self.out_dim)) # Xavier initialization: for fc in self.fcs: nn.init.xavier_uniform_(fc.weight) self.activation = nn.Tanh() def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): shape [bs, self.in_dim] Returns: torch.Tensor: shape [bs, self.out_dim] """ for fc in self.fcs[:-1]: x = self.activation(fc(x)) x = self.fcs[-1](x) return x losses = [] def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') losses = [] for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() return losses def train_lbfgs(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): print(f'Epoch {i} ', end='') def closure(): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Loss {losses[-1]}') return loss_sum optimizer.step(closure) for lr in lrs: print(f'Learning rate {lr}') os.makedirs('lr_test/u', exist_ok=True) path_u: str = f'lr_test/u/lr_{str(lr)}.pt' path_u_intermediate: str = f'lr_test/u/lr_{str(lr)}_inter.pt' u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters(), lr=lr) train(u, optimizer, loss, gauss_points_tensor, 1000) torch.save(u.state_dict(), path_u_intermediate) u = U().to(device) u.load_state_dict(torch.load(path_u_intermediate)) optimizer = torch.optim.LBFGS(u.parameters()) train_lbfgs(u, optimizer, loss, gauss_points_tensor, 100) torch.save(u.state_dict(), path_u) H_arrays.append(H_updates[-1]) visualisation_points_tensor: torch.Tensor = visualisation_points_tensor.to(device) for lr in lrs: model = U().to(device) model.load_state_dict(torch.load(f'lr_test/u/lr_{str(lr)}.pt')) model.eval() outputs = model(visualisation_points_tensor) dis_field = outputs[...,:dis_field_dim].squeeze().detach().to('cpu').numpy() phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualisation_points, phase_field) plt.title('phase field phi') plt.ylabel('phi(x)') plt.xlabel('x') plt.show() ```
github_jupyter
from typing import Any, List, Tuple import torch # General parameters for modeling the problem time_steps = 1 num_points = 112 displacement_step = 0.0 # Initial crack crack = 0.0 crack_width = 0.025 # Material constants young_modulus = 1 poisson_ratio = 0.3 # typical for metal # lame_lambda = young_modulus*poisson_ratio/((1 + poisson_ratio)*(1 - 2*poisson_ratio)) # lame_mu = young_modulus/(2*(1 + poisson_ratio)) lame_lambda = 0 lame_mu = 0.5 G_c = 0.5*crack_width # Stress degradation function def g(x: torch.Tensor) -> torch.Tensor: """Stress degradation function. """ return (1-x)**2 # Finite differences delta = 0.00001 import numpy as np # As a convention we will mark variables of the type torch.Tensor with the suffix _t if its type could be uncertain. visualization_points = np.linspace(-1.0, 1.0, num=num_points*3) visualization_points_t: torch.Tensor = torch.tensor(visualization_points, dtype=torch.float).unsqueeze(dim=-1) # The Gauss points and weights are generated seperately for each region. They are generated on the reference interval [-1,1] and transposed to the actual region. # Left side of the bar: lc, lc_weights = np.polynomial.legendre.leggauss(112) lc *= (1.0-crack_width)*0.5 lc += (crack_width-1.0)*0.5 lc_weights *= (1.0-crack_width)*0.5 # Crack region: c, c_weights = np.polynomial.legendre.leggauss(112) c *= crack_width*0.5 c_weights *= crack_width*0.5 # Right side of the bar: rc, rc_weights = np.polynomial.legendre.leggauss(112) rc *= (1.0-crack_width)*0.5 rc += (1.0-crack_width)*0.5 rc_weights *= (1.0-crack_width)*0.5 gauss_points = np.concatenate((lc, c, rc)) gauss_points_t: torch.Tensor = torch.tensor(gauss_points, dtype=torch.float).unsqueeze(dim=-1) gauss_weights = np.concatenate((lc_weights, c_weights, rc_weights)) gauss_weights_t: torch.Tensor = torch.tensor(gauss_weights, dtype=torch.float).unsqueeze(dim=-1) import torch.nn as nn # PINN architecture: in_dim = 1 phase_field_dim = 1 dis_field_dim = 1 hidden_dim = 50 depth = 3 class PINN(nn.Module): def __init__(self) -> None: super().__init__() self.hidden_dim = hidden_dim self.depth = depth self.in_dim = in_dim self.out_dim = dis_field_dim + phase_field_dim self.fcs = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim) for i in range(depth-1)]) self.fcs.insert(0, nn.Linear(self.in_dim, self.hidden_dim)) self.fcs.append(nn.Linear(self.hidden_dim, self.out_dim)) # Xavier initialization: for fc in self.fcs: nn.init.xavier_uniform_(fc.weight) self.activation = nn.Tanh() self.sigmoid = nn.Sigmoid() def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): shape [bs, self.in_dim] Returns: torch.Tensor: shape [bs, self.out_dim] """ for fc in self.fcs[:-1]: x = self.activation(fc(x)) x = self.fcs[-1](x) x[:,-1] = self.sigmoid(x[:,-1]) return x # Dirichlet boundary conditions class U(nn.Module): def __init__(self, pinn: PINN=None): super().__init__() if pinn is None: self.pinn = PINN() else: self.pinn = pinn def forward(self, x: torch.Tensor) -> torch.Tensor: ones = torch.ones(x.shape, device=device) y = self.pinn(x) u_hat, phi= y[...,:dis_field_dim], y[...,dis_field_dim:] return torch.cat(((x-ones)*(x+ones)*u_hat, phi), dim=-1) import time def jacobian(f, x: torch.Tensor, h: float, mode: str='forward', y_1: torch.Tensor=None): # Precompute this tensor for faster results: h_t: torch.Tensor = torch.ones(x.shape, device=device)*h if mode == 'forward': if y_1 is None: y_1: torch.Tensor = f(x) # shape [bs, out_dim] y_2: torch.Tensor = f(x + h_t) # shape [bs, out_dim] elif mode == 'backward': if y_1 is None: y_1: torch.Tensor = f(x - h_t) # shape [bs, out_dim] y_2: torch.Tensor = f(x)# shape [bs, out_dim] else: y_2: torch.Tensor = y_1 # shape [bs, out_dim] y_1: torch.Tensor = f(x - h_t) # shape [bs, out_dim] elif mode == 'central': y_1: torch.Tensor = f(x - 0.5*h_t) # shape [bs, out_dim] y_2: torch.Tensor = f(x + 0.5*h_t) # shape [bs, out_dim] else: print("Please enter a valid differentiation mode") return None return (y_2 - y_1)/h_t # shape [bs, out_dim] def f_e(phi: torch.Tensor, nabla_u: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: eigenvalues: torch.Tensor = nabla_u.squeeze() # shape [bs] psi_pos: torch.Tensor = (lame_lambda/8 + lame_mu/4)*(eigenvalues + torch.abs(eigenvalues))**2 psi_neg: torch.Tensor = (lame_lambda/8 + lame_mu/4)*(eigenvalues - torch.abs(eigenvalues))**2 return g(phi)*psi_pos + psi_neg, psi_pos def f_c(phi: torch.Tensor, nabla_phi: torch.Tensor, psi_pos: torch.Tensor) -> torch.Tensor: nabla_phi: torch.Tensor = nabla_phi.squeeze() # shape [bs] return G_c/(2*crack_width)*(phi**2 + (crack_width**2)*torch.abs(nabla_phi)**2) + g(phi)*psi_pos def total_energy(u: U, x: torch.Tensor, verbose: bool=False, save_loss: bool=False, fe_list: List[float]=None, fc_list: List[float]=None) -> torch.Tensor: t1: float = time.time() outputs: torch.Tensor = u(x) phi: torch.Tensor = outputs[...,dis_field_dim:] t2: float = time.time() nabla_pinn: torch.Tensor = jacobian(u, x, 0.0001, y_1=outputs) t3: float = time.time() nabla_u: torch.Tensor = nabla_pinn[:,:dis_field_dim] nabla_phi: torch.Tensor = nabla_pinn[:,dis_field_dim:] t4: float = time.time() strain_energy, psi_pos = f_e(phi, nabla_u) strain_energy = torch.sum(strain_energy*gauss_weights_t) t5: float = time.time() fracture_energy = torch.sum(f_c(phi, nabla_phi, psi_pos)*gauss_weights_t) t6: float = time.time() if verbose: print(f'U: {t2-t1} jac: {t3-t2} nabla: {t4-t3} f_e: {t5-t4} f_c: {t6-t5}') if save_loss: fe_list.append(strain_energy.detach().to('cpu').item()) fc_list.append(fracture_energy.detach().to('cpu').item()) return strain_energy + fracture_energy import os device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters()) init_loss = nn.BCELoss() gauss_points_t: torch.Tensor = gauss_points_t.to(device) gauss_weights_t: torch.Tensor = gauss_weights_t.to(device) phi_values_t: torch.Tensor = torch.Tensor([1.0 if abs(x)<=crack_width else 0.0 for x in gauss_points]).unsqueeze(dim=-1) phi_values_t: torch.Tensor = phi_values_t.to(device) init_losses: List[float] = [] def init_train(u: U, optimizer, x: torch.Tensor, y: torch.Tensor, limit: float) -> None: print('Initializing phase field') loss: float = limit + 1.0 while loss >= limit: optimizer.zero_grad() phi = u(x)[...,dis_field_dim:] loss_sum: torch.Tensor = init_loss(phi, y) loss_sum.backward() init_losses.append(loss_sum.detach().to("cpu").item()) loss = init_losses[-1] print(f'Epoch {len(init_losses)} Loss {loss}') optimizer.step() os.makedirs('./u', exist_ok=True) path_u_pretrained: str = f'./u/pretrained.pt' init_train(u, optimizer, gauss_points_t, phi_values_t, 0.1) torch.save(u.state_dict(), path_u_pretrained) import matplotlib.pyplot as plt plt.plot(init_losses) plt.title('Phase field initialization') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u_pretrained)) model.eval() outputs = model(visualization_points_t) phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, phase_field) plt.title('Phase field') plt.ylabel('phi(x)') plt.xlabel('x') plt.show() import os device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') gauss_points_t: torch.Tensor = gauss_points_t.to(device) gauss_weights_t: torch.Tensor = gauss_weights_t.to(device) losses = [] fe_list: List[float] = [] fc_list: List[float] = [] def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x, save_loss=True, fe_list, fc_list) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() def train_lbfgs(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): print(f'Epoch {i} ', end='') def closure(): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x, save_loss=True, fe_list, fc_list) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Loss {losses[-1]}') return loss_sum optimizer.step(closure) os.makedirs('./u', exist_ok=True) path_u_intermediate: str = f'./u/inter.pt' path_u: str = f'./u/final.pt' u = U().to(device) u.load_state_dict(torch.load(path_u_pretrained)) optimizer = torch.optim.Adam(u.parameters()) train(u, optimizer, total_energy, gauss_points_t, 1500) torch.save(u.state_dict(), path_u_intermediate) u = U().to(device) u.load_state_dict(torch.load(path_u_intermediate)) optimizer = torch.optim.LBFGS(u.parameters()) train_lbfgs(u, optimizer, total_energy, gauss_points_t, 50) torch.save(u.state_dict(), path_u) import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u)) model.eval() outputs = model(visualization_points_t) dis_field = outputs[...,:dis_field_dim].squeeze().detach().to('cpu').numpy() phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, phase_field) plt.title('phase field phi') plt.ylabel('phi(x)') plt.xlabel('x') plt.show() import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u)) model.eval() outputs = model(visualization_points_t) dis_field = outputs[...,:dis_field_dim].squeeze().detach().to('cpu').numpy() phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, dis_field) plt.title('displacement field u') plt.ylabel('u(x)') plt.xlabel('x') plt.show() import matplotlib.pyplot as plt plt.plot(losses) plt.title('Total variational energy (loss) while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt plt.plot(fe_list) plt.title('Strain energy while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt plt.plot(fc_list) plt.title('Fracture energy while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import time H_init: torch.Tensor = torch.Tensor([1000.0 if abs(x)<=crack_width else 0.0 for x in gauss_points]).unsqueeze(dim=-1) def f_c(phi: torch.Tensor, nabla_phi: torch.Tensor, psi_pos: torch.Tensor) -> torch.Tensor: nabla_phi: torch.Tensor = nabla_phi.squeeze() # shape [bs] H: torch.Tensor = torch.max(psi_pos, H_init) return G_c/(2*crack_width)*(phi**2 + (crack_width**2)*torch.abs(nabla_phi)**2) + g(phi)*H import os device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') gauss_points_t: torch.Tensor = gauss_points_t.to(device) gauss_weights_t: torch.Tensor = gauss_weights_t.to(device) H_init: torch.Tensor = H_init.to(device) losses = [] fe_list: List[float] = [] fc_list: List[float] = [] def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x, save_loss=True, fe_list, fc_list) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() def train_lbfgs(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): print(f'Epoch {i} ', end='') def closure(): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x, save_loss=True, fe_list, fc_list) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Loss {losses[-1]}') return loss_sum optimizer.step(closure) os.makedirs('./u', exist_ok=True) path_u_intermediate: str = f'./u/inter.pt' path_u: str = f'./u/final.pt' u = U().to(device) optimizer = torch.optim.Adam(u.parameters()) train(u, optimizer, total_energy, gauss_points_t, 1500) torch.save(u.state_dict(), path_u_intermediate) u = U().to(device) u.load_state_dict(torch.load(path_u_intermediate)) optimizer = torch.optim.LBFGS(u.parameters()) train_lbfgs(u, optimizer, total_energy, gauss_points_t, 50) torch.save(u.state_dict(), path_u) import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u)) model.eval() outputs = model(visualization_points_t) phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, phase_field) plt.title('Phase field') plt.ylabel('phi(x)') plt.xlabel('x') plt.show() import matplotlib.pyplot as plt visualization_points_t: torch.Tensor = visualization_points_t.to(device) model = U().to(device) model.load_state_dict(torch.load(path_u)) model.eval() outputs = model(visualization_points_t) dis_field = outputs[...,:dis_field_dim].squeeze().detach().to('cpu').numpy() phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualization_points, dis_field) plt.title('displacement field u') plt.ylabel('u(x)') plt.xlabel('x') plt.show() import matplotlib.pyplot as plt plt.plot(losses) plt.title('Total variational energy (loss) while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt plt.plot(fe_list) plt.title('Strain energy while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import matplotlib.pyplot as plt plt.plot(fc_list) plt.title('Fracture energy while training') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import os import matplotlib.pyplot as plt tries = 5 gauss_points_tensor: torch.Tensor = gauss_points_tensor.to(device) weights_tensor: torch.Tensor = weights_tensor.to(device) def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') losses = [] for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() return losses losses_default = [] for i in range(tries): u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters()) print(f'Try {i}') os.makedirs('/init_test/u/', exist_ok=True) # path_pinn: str = f'.\\pinn\\ls_{i}.pt' path_u_intermediate: str = f'/init_test/u/default_try_{i}.pt' losses_default.append(train(u, optimizer, loss, gauss_points_tensor, 250)) torch.save(u.state_dict(), path_u_intermediate) for i in range(tries): plt.plot(losses_default[i]) plt.title(f'Default initialization try {i}') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import os import matplotlib.pyplot as plt tries = 5 gauss_points_tensor: torch.Tensor = gauss_points_tensor.to(device) weights_tensor: torch.Tensor = weights_tensor.to(device) class PINN(nn.Module): def __init__(self) -> None: super().__init__() self.hidden_dim = hidden_dim self.depth = depth self.in_dim = in_dim self.out_dim = dis_field_dim + phase_field_dim self.fcs = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim) for i in range(depth-1)]) self.fcs.insert(0, nn.Linear(self.in_dim, self.hidden_dim)) self.fcs.append(nn.Linear(self.hidden_dim, self.out_dim)) # Xavier initialization: for fc in self.fcs: nn.init.xavier_uniform_(fc.weight) self.activation = nn.Tanh() def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): shape [bs, self.in_dim] Returns: torch.Tensor: shape [bs, self.out_dim] """ for fc in self.fcs[:-1]: x = self.activation(fc(x)) x = self.fcs[-1](x) return x def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') losses = [] for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() return losses losses_xavier_normal = [] for i in range(tries): u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters()) print(f'Try {i}') os.makedirs('/init_test/u', exist_ok=True) # path_pinn: str = f'.\\pinn\\ls_{i}.pt' path_u_intermediate: str = f'/init_test/u/xavier_normal_try_{i}.pt' losses_xavier_normal.append(train(u, optimizer, loss, gauss_points_tensor, 250)) torch.save(u.state_dict(), path_u_intermediate) for i in range(tries): plt.plot(losses_xavier_normal[i]) plt.title(f'Xavier normal initialization try {i}') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import os import matplotlib.pyplot as plt tries = 5 gauss_points_tensor: torch.Tensor = gauss_points_tensor.to(device) weights_tensor: torch.Tensor = weights_tensor.to(device) class PINN(nn.Module): def __init__(self) -> None: super().__init__() self.hidden_dim = hidden_dim self.depth = depth self.in_dim = in_dim self.out_dim = dis_field_dim + phase_field_dim self.fcs = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim) for i in range(depth-1)]) self.fcs.insert(0, nn.Linear(self.in_dim, self.hidden_dim)) self.fcs.append(nn.Linear(self.hidden_dim, self.out_dim)) # Xavier initialization: for fc in self.fcs: nn.init.xavier_uniform_(fc.weight) self.activation = nn.Tanh() def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): shape [bs, self.in_dim] Returns: torch.Tensor: shape [bs, self.out_dim] """ for fc in self.fcs[:-1]: x = self.activation(fc(x)) x = self.fcs[-1](x) return x def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') losses = [] for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() return losses losses_xavier_uniform = [] for i in range(tries): u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters()) print(f'Try {i}') os.makedirs('/init_test/u', exist_ok=True) # path_pinn: str = f'.\\pinn\\ls_{i}.pt' path_u_intermediate: str = f'/init_test/u/xavier_uniform_try_{i}.pt' losses_xavier_uniform.append(train(u, optimizer, loss, gauss_points_tensor, 250)) torch.save(u.state_dict(), path_u_intermediate) for i in range(tries): plt.plot(losses_xavier_uniform[i]) plt.title(f'Xavier uniform initialization try {i}') plt.ylabel('loss') plt.xlabel('epochs') plt.show() import os import matplotlib.pyplot as plt lrs = [0.01, 0.0001] gauss_points_tensor: torch.Tensor = gauss_points_tensor.to(device) weights_tensor: torch.Tensor = weights_tensor.to(device) class PINN(nn.Module): def __init__(self) -> None: super().__init__() self.hidden_dim = hidden_dim self.depth = depth self.in_dim = in_dim self.out_dim = dis_field_dim + phase_field_dim self.fcs = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim) for i in range(depth-1)]) self.fcs.insert(0, nn.Linear(self.in_dim, self.hidden_dim)) self.fcs.append(nn.Linear(self.hidden_dim, self.out_dim)) # Xavier initialization: for fc in self.fcs: nn.init.xavier_uniform_(fc.weight) self.activation = nn.Tanh() def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): shape [bs, self.in_dim] Returns: torch.Tensor: shape [bs, self.out_dim] """ for fc in self.fcs[:-1]: x = self.activation(fc(x)) x = self.fcs[-1](x) return x losses = [] def train(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') losses = [] for i in range(epochs): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Epoch {i} Loss {losses[-1]}') optimizer.step() return losses def train_lbfgs(u: U, optimizer, loss, x: torch.Tensor, epochs: int) -> None: print('Training') for i in range(epochs): print(f'Epoch {i} ', end='') def closure(): optimizer.zero_grad() loss_sum: torch.Tensor = loss(u, x) loss_sum.backward() losses.append(loss_sum.detach().to("cpu").item()) print(f'Loss {losses[-1]}') return loss_sum optimizer.step(closure) for lr in lrs: print(f'Learning rate {lr}') os.makedirs('lr_test/u', exist_ok=True) path_u: str = f'lr_test/u/lr_{str(lr)}.pt' path_u_intermediate: str = f'lr_test/u/lr_{str(lr)}_inter.pt' u: U = U().to(device) optimizer = torch.optim.Adam(u.parameters(), lr=lr) train(u, optimizer, loss, gauss_points_tensor, 1000) torch.save(u.state_dict(), path_u_intermediate) u = U().to(device) u.load_state_dict(torch.load(path_u_intermediate)) optimizer = torch.optim.LBFGS(u.parameters()) train_lbfgs(u, optimizer, loss, gauss_points_tensor, 100) torch.save(u.state_dict(), path_u) H_arrays.append(H_updates[-1]) visualisation_points_tensor: torch.Tensor = visualisation_points_tensor.to(device) for lr in lrs: model = U().to(device) model.load_state_dict(torch.load(f'lr_test/u/lr_{str(lr)}.pt')) model.eval() outputs = model(visualisation_points_tensor) dis_field = outputs[...,:dis_field_dim].squeeze().detach().to('cpu').numpy() phase_field = outputs[...,dis_field_dim:].squeeze().detach().to('cpu').numpy() plt.plot(visualisation_points, phase_field) plt.title('phase field phi') plt.ylabel('phi(x)') plt.xlabel('x') plt.show()
0.919985
0.984441
``` !pip install pulp from pulp import * pulp.LpVariable? mi_lp_problema = pulp.LpProblem("Mi LP Problema", pulp.LpMinimize) x = pulp.LpVariable('x', lowBound=0,cat='Continuous') y = pulp.LpVariable('y', lowBound=0,cat='Continuous') #ObjectiveFunction mi_lp_problema += x + 2*y #Constraints mi_lp_problema += 3*x + 4*y >= 1 mi_lp_problema += 2*x + 5*y >= 2 mi_lp_problema mi_lp_problema mi_lp_problema.solve() for variable in mi:lp_problema.variables(): print("{} = {}".format(variable.name)) !pip install cvxopt from cvxopt import * A= matrix([ [-3.0, -2.0, -1.0, 0.0], [-4.0, -5.0, 0.0, -1.0]]) b=matrix([-1.0, -2.0, 0.0, 0.0]) c=matrix([1.0, 2.0]) sol = solvers.lp(c,A,b) print(sol['x']) """ minimize 2x1 + x2 subject to -x1+x2 <= 1 x1 + x2 >= 2 x2 >= 0 x1 - 2x2 <= 4 """ A= matrix([ [-1.0, -1.0, 0.0, 1.0], [1.0, -1.0, -1.0, -2.0]]) b=matrix([1.0, -2.0, 0.0, 4.0]) c=matrix([2.0, 1.0]) sol = solvers.lp(c,A,b) print(sol['x']) from scipy.optimize import linprog """ minimize 2x1 + x2 subject to -x1+x2 <= 1 x1 + x2 >= 2 x2 >= 0 x1 - 2x2 <= 4 """ c = [2,1] A = [[-1,1], [-1,-1], [1,-2]] b= [1, -2 ,4] x0_bounds= (0, None) x1_bounds= (0, None) res = linprog(c, A_ub=A,b_ub=b,bounds=(x0_bounds,x1_bounds), options={"disp":True}) import matplotlib.pyplot as plt import numpy as np from scipy.integrate import odeint %matplotlib inline k = 3.0 # constante del resorte m = 1.0 B = 0.5 #Constante de amortiguacion def armonico(variables, t): x,y = variables return [y, -k*x/m-B/m*y] inicial = [0.8,0]#vector de posicion inicial y velocidad inicial # condiciones iniciales x(t=0)=0.8[m] y(t=0)=0.0 [m/s] tiempo = np.arange(0,20,0.01) resultado=odeint(armonico,inicial,tiempo) #el sistema se resuelve con #odeint(sistema, conidciones iniciales, rangodnde graficaremos) xx, yy = resultado.T #extrae posicion y velocidad plt.plot(tiempo,xx,c='r', label='Posicion') plt.plot(tiempo,yy,c='b', label='Velocidad') plt.legend(loc= 'best',prop={'size':14}) plt.xlabel('tiempo', fontsize=14) Omega=k/m Omega from ipywidgets import * def amortiguado(k=1,m=4.1, B=0.5): #k = 3.0 # constante del resorte #m = 1.0 #B = 0.5 #Constante de amortiguacion def armonico(variables, t): x,y = variables return [y, -k*x/m-B/m*y] inicial = [0.8,0]#vector de posicion inicial y velocidad inicial # condiciones iniciales x(t=0)=0.8[m] y(t=0)=0.0 [m/s] tiempo = np.arange(0,20,0.01) resultado=odeint(armonico,inicial,tiempo) #el sistema se resuelve con #odeint(sistema, conidciones iniciales, rangodnde graficaremos) xx, yy = resultado.T #extrae posicion y velocidad plt.plot(tiempo,xx,c='r', label='Posicion') plt.plot(tiempo,yy,c='b', label='Velocidad') plt.legend(loc= 'best',prop={'size':14}) plt.xlabel('tiempo', fontsize=14) plt.show() interact_manual(amortiguado, k=(0,10,0.1),m=(0,10,0.1), B=(0,10,0.1)) def crecimiento(r=1): def poblacion(variables, t): return r*x*(1-x) inicial = 0.05 tiempo = np.linspace(0,10) xx=odeint(poblacion,inicial,tiempo) plt.plot(tiempo,xx,c='r', label='Posicion') plt.legend(loc= 'best',prop={'size':14}) plt.xlabel('tiempo', fontsize=14) plt.show() interact_manual(crecimiento, r=(0,10,0.1)) #controlar un robot def trayectoria(R=0.5,L=1,Vr=1,Vl=1): def sistema(variables, t): x,y,phi= variables return [(R/2)*(Vr+Vl)*np.cos(phi),(R/2)*(Vr+Vl)*np.sin(phi), R/L * (Vr-Vl) ] inicial = [0,0,0] tiempo = np.arange(0,5,0.01) resultado=odeint(sistema,inicial,tiempo) xx, yy = resultado[:,0],resultado[:,1] plt.plot(xx,yy,c='r') plt.show() interact_manual(trayectoria, R=(0,5,0.1), L=(0,5,0.1), Vr=(0,5,0.1), Vl=(0,5,0.1)) #webots ```
github_jupyter
!pip install pulp from pulp import * pulp.LpVariable? mi_lp_problema = pulp.LpProblem("Mi LP Problema", pulp.LpMinimize) x = pulp.LpVariable('x', lowBound=0,cat='Continuous') y = pulp.LpVariable('y', lowBound=0,cat='Continuous') #ObjectiveFunction mi_lp_problema += x + 2*y #Constraints mi_lp_problema += 3*x + 4*y >= 1 mi_lp_problema += 2*x + 5*y >= 2 mi_lp_problema mi_lp_problema mi_lp_problema.solve() for variable in mi:lp_problema.variables(): print("{} = {}".format(variable.name)) !pip install cvxopt from cvxopt import * A= matrix([ [-3.0, -2.0, -1.0, 0.0], [-4.0, -5.0, 0.0, -1.0]]) b=matrix([-1.0, -2.0, 0.0, 0.0]) c=matrix([1.0, 2.0]) sol = solvers.lp(c,A,b) print(sol['x']) """ minimize 2x1 + x2 subject to -x1+x2 <= 1 x1 + x2 >= 2 x2 >= 0 x1 - 2x2 <= 4 """ A= matrix([ [-1.0, -1.0, 0.0, 1.0], [1.0, -1.0, -1.0, -2.0]]) b=matrix([1.0, -2.0, 0.0, 4.0]) c=matrix([2.0, 1.0]) sol = solvers.lp(c,A,b) print(sol['x']) from scipy.optimize import linprog """ minimize 2x1 + x2 subject to -x1+x2 <= 1 x1 + x2 >= 2 x2 >= 0 x1 - 2x2 <= 4 """ c = [2,1] A = [[-1,1], [-1,-1], [1,-2]] b= [1, -2 ,4] x0_bounds= (0, None) x1_bounds= (0, None) res = linprog(c, A_ub=A,b_ub=b,bounds=(x0_bounds,x1_bounds), options={"disp":True}) import matplotlib.pyplot as plt import numpy as np from scipy.integrate import odeint %matplotlib inline k = 3.0 # constante del resorte m = 1.0 B = 0.5 #Constante de amortiguacion def armonico(variables, t): x,y = variables return [y, -k*x/m-B/m*y] inicial = [0.8,0]#vector de posicion inicial y velocidad inicial # condiciones iniciales x(t=0)=0.8[m] y(t=0)=0.0 [m/s] tiempo = np.arange(0,20,0.01) resultado=odeint(armonico,inicial,tiempo) #el sistema se resuelve con #odeint(sistema, conidciones iniciales, rangodnde graficaremos) xx, yy = resultado.T #extrae posicion y velocidad plt.plot(tiempo,xx,c='r', label='Posicion') plt.plot(tiempo,yy,c='b', label='Velocidad') plt.legend(loc= 'best',prop={'size':14}) plt.xlabel('tiempo', fontsize=14) Omega=k/m Omega from ipywidgets import * def amortiguado(k=1,m=4.1, B=0.5): #k = 3.0 # constante del resorte #m = 1.0 #B = 0.5 #Constante de amortiguacion def armonico(variables, t): x,y = variables return [y, -k*x/m-B/m*y] inicial = [0.8,0]#vector de posicion inicial y velocidad inicial # condiciones iniciales x(t=0)=0.8[m] y(t=0)=0.0 [m/s] tiempo = np.arange(0,20,0.01) resultado=odeint(armonico,inicial,tiempo) #el sistema se resuelve con #odeint(sistema, conidciones iniciales, rangodnde graficaremos) xx, yy = resultado.T #extrae posicion y velocidad plt.plot(tiempo,xx,c='r', label='Posicion') plt.plot(tiempo,yy,c='b', label='Velocidad') plt.legend(loc= 'best',prop={'size':14}) plt.xlabel('tiempo', fontsize=14) plt.show() interact_manual(amortiguado, k=(0,10,0.1),m=(0,10,0.1), B=(0,10,0.1)) def crecimiento(r=1): def poblacion(variables, t): return r*x*(1-x) inicial = 0.05 tiempo = np.linspace(0,10) xx=odeint(poblacion,inicial,tiempo) plt.plot(tiempo,xx,c='r', label='Posicion') plt.legend(loc= 'best',prop={'size':14}) plt.xlabel('tiempo', fontsize=14) plt.show() interact_manual(crecimiento, r=(0,10,0.1)) #controlar un robot def trayectoria(R=0.5,L=1,Vr=1,Vl=1): def sistema(variables, t): x,y,phi= variables return [(R/2)*(Vr+Vl)*np.cos(phi),(R/2)*(Vr+Vl)*np.sin(phi), R/L * (Vr-Vl) ] inicial = [0,0,0] tiempo = np.arange(0,5,0.01) resultado=odeint(sistema,inicial,tiempo) xx, yy = resultado[:,0],resultado[:,1] plt.plot(xx,yy,c='r') plt.show() interact_manual(trayectoria, R=(0,5,0.1), L=(0,5,0.1), Vr=(0,5,0.1), Vl=(0,5,0.1)) #webots
0.363195
0.490175
``` # !pip install PytorchCML import sys sys.path.append("../../src/") from itertools import product from PytorchCML import losses, models, samplers, evaluators, trainers import torch from torch import nn, optim import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.decomposition import TruncatedSVD from scipy.sparse import csr_matrix def svd_init(X, dim): """ Args : X : csr_matrix which element is 0 or 1. dim : number of dimention """ svd = TruncatedSVD(n_components=10) U_ = svd.fit_transform(X) V_ = svd.components_ s = (U_.sum(axis=1).mean() + V_.sum(axis=0).mean()) / 2 U = 2 ** 0.5 * U_ - (1 / n_dim) ** 0.5 * s * np.ones_like(U_) V = 2 ** 0.5 * V_ + (1 / n_dim) ** 0.5 / s * np.ones_like(V_) ub = -(2 / n_dim) ** 0.5 * U_.sum(axis=1) / s vb = (2 / n_dim) ** 0.5 * V_.sum(axis=0) * s return U, V, ub, vb movielens = pd.read_csv( 'http://files.grouplens.org/datasets/movielens/ml-100k/u.data', sep='\t', header=None, index_col=None, ) movielens.columns = ["user_id", "item_id", "rating", "timestamp"] movielens.user_id -= 1 movielens.item_id -= 1 movielens.rating = (movielens.rating >= 4).astype(int) n_user = movielens.user_id.nunique() n_item = movielens.item_id.nunique() train, test = train_test_split(movielens.copy()) # all user item pairs df_all = pd.DataFrame( [[u, i] for u,i in product(range(n_user), range(n_item))], columns=["user_id", "item_id"] ) # frag train pairs df_all = pd.merge( df_all, train[["user_id", "item_id", "rating"]], on=["user_id", "item_id"], how="left" ) # remove train pairs test = pd.merge( df_all[df_all.rating.isna()][["user_id", "item_id"]], test[["user_id", "item_id", "rating"]], on=["user_id", "item_id"], how="left" ).fillna(0) # numpy array train_set = train[train.rating == 1][["user_id", "item_id"]].values test_set = test[["user_id", "item_id", "rating"]].values # to torch.Tensor device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") train_set = torch.LongTensor(train_set).to(device) test_set = torch.LongTensor(test_set).to(device) n_dim = 10 X = csr_matrix( (np.ones(train_set.shape[0]), (train_set[:,0], train_set[:,1])), shape=[n_user, n_item] ) U, V, ub, vb = svd_init(X, n_dim) ``` # Naive MF ``` device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") lr = 1e-3 n_dim = 10 model = models.LogitMatrixFactorization( n_user, n_item, n_dim, max_norm=5,max_bias=3, user_embedding_init = torch.Tensor(U), item_embedding_init = torch.Tensor(V.T), user_bias_init = torch.Tensor(ub), item_bias_init = torch.Tensor(vb) ).to(device) optimizer = optim.Adam(model.parameters(), lr=lr) criterion = losses.LogitPairwiseLoss().to(device) sampler = samplers.BaseSampler(train_set, n_user, n_item, device=device,n_neg_samples=5, batch_size=1024) score_function_dict = { "nDCG" : evaluators.ndcg, "MAP" : evaluators.average_precision, "Recall": evaluators.recall } evaluator = evaluators.UserwiseEvaluator(torch.LongTensor(test_set).to(device), score_function_dict, ks=[3]) trainer = trainers.MFTrainer(model, optimizer, criterion, sampler) trainer.fit(n_batch=50, n_epoch=15, valid_evaluator = evaluator, valid_per_epoch=5) trainer.valid_scores ``` # RelMF ``` train["popularity"] = train.groupby("item_id").rating.transform(sum) train["pscore"] = 1 / (train.popularity / train.popularity.max()) ** 0.5 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") lr = 1e-3 n_dim = 10 train_set = train[train.rating == 1][["user_id", "item_id", "pscore"]].values train_set = torch.LongTensor(train_set).to(device) model = models.LogitMatrixFactorization( n_user, n_item, n_dim, max_norm=5,max_bias=3, user_embedding_init = torch.Tensor(U), item_embedding_init = torch.Tensor(V.T), user_bias_init = torch.Tensor(ub), item_bias_init = torch.Tensor(vb) ).to(device) optimizer = optim.Adam(model.parameters(), lr=lr) criterion = losses.RelevancePairwiseLoss(delta="rmse").to(device) sampler = samplers.BaseSampler(train_set, n_user, n_item, device=device,n_neg_samples=5, batch_size=1024) score_function_dict = { "nDCG" : evaluators.ndcg, "MAP" : evaluators.average_precision, "Recall": evaluators.recall } evaluator = evaluators.UserwiseEvaluator(torch.LongTensor(test_set).to(device), score_function_dict, ks=[3]) trainer = trainers.MFTrainer( model, optimizer, criterion, sampler, column_names={"user_id":0, "item_id":1, "pscore":2} ) trainer.fit(n_batch=50, n_epoch=15, valid_evaluator = evaluator, valid_per_epoch=5) trainer.valid_scores ```
github_jupyter
# !pip install PytorchCML import sys sys.path.append("../../src/") from itertools import product from PytorchCML import losses, models, samplers, evaluators, trainers import torch from torch import nn, optim import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.decomposition import TruncatedSVD from scipy.sparse import csr_matrix def svd_init(X, dim): """ Args : X : csr_matrix which element is 0 or 1. dim : number of dimention """ svd = TruncatedSVD(n_components=10) U_ = svd.fit_transform(X) V_ = svd.components_ s = (U_.sum(axis=1).mean() + V_.sum(axis=0).mean()) / 2 U = 2 ** 0.5 * U_ - (1 / n_dim) ** 0.5 * s * np.ones_like(U_) V = 2 ** 0.5 * V_ + (1 / n_dim) ** 0.5 / s * np.ones_like(V_) ub = -(2 / n_dim) ** 0.5 * U_.sum(axis=1) / s vb = (2 / n_dim) ** 0.5 * V_.sum(axis=0) * s return U, V, ub, vb movielens = pd.read_csv( 'http://files.grouplens.org/datasets/movielens/ml-100k/u.data', sep='\t', header=None, index_col=None, ) movielens.columns = ["user_id", "item_id", "rating", "timestamp"] movielens.user_id -= 1 movielens.item_id -= 1 movielens.rating = (movielens.rating >= 4).astype(int) n_user = movielens.user_id.nunique() n_item = movielens.item_id.nunique() train, test = train_test_split(movielens.copy()) # all user item pairs df_all = pd.DataFrame( [[u, i] for u,i in product(range(n_user), range(n_item))], columns=["user_id", "item_id"] ) # frag train pairs df_all = pd.merge( df_all, train[["user_id", "item_id", "rating"]], on=["user_id", "item_id"], how="left" ) # remove train pairs test = pd.merge( df_all[df_all.rating.isna()][["user_id", "item_id"]], test[["user_id", "item_id", "rating"]], on=["user_id", "item_id"], how="left" ).fillna(0) # numpy array train_set = train[train.rating == 1][["user_id", "item_id"]].values test_set = test[["user_id", "item_id", "rating"]].values # to torch.Tensor device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") train_set = torch.LongTensor(train_set).to(device) test_set = torch.LongTensor(test_set).to(device) n_dim = 10 X = csr_matrix( (np.ones(train_set.shape[0]), (train_set[:,0], train_set[:,1])), shape=[n_user, n_item] ) U, V, ub, vb = svd_init(X, n_dim) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") lr = 1e-3 n_dim = 10 model = models.LogitMatrixFactorization( n_user, n_item, n_dim, max_norm=5,max_bias=3, user_embedding_init = torch.Tensor(U), item_embedding_init = torch.Tensor(V.T), user_bias_init = torch.Tensor(ub), item_bias_init = torch.Tensor(vb) ).to(device) optimizer = optim.Adam(model.parameters(), lr=lr) criterion = losses.LogitPairwiseLoss().to(device) sampler = samplers.BaseSampler(train_set, n_user, n_item, device=device,n_neg_samples=5, batch_size=1024) score_function_dict = { "nDCG" : evaluators.ndcg, "MAP" : evaluators.average_precision, "Recall": evaluators.recall } evaluator = evaluators.UserwiseEvaluator(torch.LongTensor(test_set).to(device), score_function_dict, ks=[3]) trainer = trainers.MFTrainer(model, optimizer, criterion, sampler) trainer.fit(n_batch=50, n_epoch=15, valid_evaluator = evaluator, valid_per_epoch=5) trainer.valid_scores train["popularity"] = train.groupby("item_id").rating.transform(sum) train["pscore"] = 1 / (train.popularity / train.popularity.max()) ** 0.5 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") lr = 1e-3 n_dim = 10 train_set = train[train.rating == 1][["user_id", "item_id", "pscore"]].values train_set = torch.LongTensor(train_set).to(device) model = models.LogitMatrixFactorization( n_user, n_item, n_dim, max_norm=5,max_bias=3, user_embedding_init = torch.Tensor(U), item_embedding_init = torch.Tensor(V.T), user_bias_init = torch.Tensor(ub), item_bias_init = torch.Tensor(vb) ).to(device) optimizer = optim.Adam(model.parameters(), lr=lr) criterion = losses.RelevancePairwiseLoss(delta="rmse").to(device) sampler = samplers.BaseSampler(train_set, n_user, n_item, device=device,n_neg_samples=5, batch_size=1024) score_function_dict = { "nDCG" : evaluators.ndcg, "MAP" : evaluators.average_precision, "Recall": evaluators.recall } evaluator = evaluators.UserwiseEvaluator(torch.LongTensor(test_set).to(device), score_function_dict, ks=[3]) trainer = trainers.MFTrainer( model, optimizer, criterion, sampler, column_names={"user_id":0, "item_id":1, "pscore":2} ) trainer.fit(n_batch=50, n_epoch=15, valid_evaluator = evaluator, valid_per_epoch=5) trainer.valid_scores
0.587233
0.673091
``` import pandas as pd import numpy as np import regex as re import nltk nltk.download('wordnet') nltk.download('punkt') nltk.download('stopwords') from nltk.corpus import stopwords import tensorflow as tf from tensorflow.python.keras.preprocessing import sequence from tensorflow.python.keras.preprocessing import text from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM,Bidirectional, GlobalMaxPool1D,Dropout,Flatten from keras.models import load_model from keras.callbacks import ModelCheckpoint data = pd.read_csv('final_texts.csv',index_col=0) data.reset_index(drop=True,inplace = True) for i,j in enumerate(data['labels']): try: j = int(j) except ValueError as e: print(f'error on {i} line') data.drop(labels=[2478],inplace= True) data['labels'].astype(int); data.dropna(inplace = True) cleaned_text = [] for text in data['texts']: text = " ".join(word for word in text.split() if not word.isdigit()) cleaned_text.append(text) data['cleaned_text'] = cleaned_text vocab = {} for text in data['cleaned_text']: sen = text.split() for word in sen: try: vocab[word] += 1 except KeyError: vocab[word] = 1 vocab = dict(sorted(vocab.items(), key=lambda item: item[1])) rare_words = [] for key,value in vocab.items(): if value<=10: rare_words.append(key) stopwords_en = set(stopwords.words('english')) cleaner_text = [] for text in data['cleaned_text']: text = " ".join([word for word in text.split() if len(word)>2 and word not in stopwords_en and word not in rare_words]) cleaner_text.append(text) data['final_text'] = cleaner_text vocab = {} for text in data['final_text']: sen = text.split() for word in sen: try: vocab[word] += 1 except KeyError: vocab[word] = 1 vocab = dict(sorted(vocab.items(), key=lambda item: item[1])) vocab_list = list(vocab.items()) vocab_size = len(vocab) x = data['final_text'].values y = data['labels'].values X_train,X_test,y_train, y_test = train_test_split(x,y, test_size = 0.2, shuffle = True) y_train = y_train.astype(int) y_test = y_test.astype(int) embeddings_index = dict() f = open('glove.twitter.27B.200d.txt') for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() print('Loaded %s word vectors.' % len(embeddings_index)) from tensorflow.python.keras.preprocessing import sequence from tensorflow.python.keras.preprocessing import text tokenizer = text.Tokenizer() tokenizer.fit_on_texts(X_train) X_train = tokenizer.texts_to_sequences(X_train) X_test = tokenizer.texts_to_sequences(X_test) X_train = sequence.pad_sequences(X_train,maxlen=200) X_test = sequence.pad_sequences(X_test,maxlen=200) tokens = len(tokenizer.word_index) + 2 embedding_matrix = np.zeros((tokens, 200)) count = 0 unknown = [] for word, i in tokenizer.word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: try: embedding_matrix[i] = embedding_vector except ValueError: unknown.append(word) count += 1 else: unknown.append(word) count += 1 print(1-(count/vocab_size)) model = Sequential() model.add(Embedding(tokens,200,weights = [embedding_matrix],input_length = embedding_matrix.shape[1])) model.add(LSTM(64)) model.add(Flatten()) model.add(Dense(1,activation='sigmoid')) model.compile(optimizer='adam',loss = 'binary_crossentropy',metrics=['accuracy']) print(model.summary()) model.fit(X_train,y_train,epochs=30) loss,accuracy = model.evaluate(X_train,y_train) print(f'acc: {accuracy}') predictions = model.predict(X_test) predictions = np.round(predictions) from sklearn.metrics import accuracy_score, confusion_matrix score = accuracy_score(y_test,predictions) cm = confusion_matrix(y_test,predictions) print("score: {} cm: {}".format(score,cm)) ```
github_jupyter
import pandas as pd import numpy as np import regex as re import nltk nltk.download('wordnet') nltk.download('punkt') nltk.download('stopwords') from nltk.corpus import stopwords import tensorflow as tf from tensorflow.python.keras.preprocessing import sequence from tensorflow.python.keras.preprocessing import text from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM,Bidirectional, GlobalMaxPool1D,Dropout,Flatten from keras.models import load_model from keras.callbacks import ModelCheckpoint data = pd.read_csv('final_texts.csv',index_col=0) data.reset_index(drop=True,inplace = True) for i,j in enumerate(data['labels']): try: j = int(j) except ValueError as e: print(f'error on {i} line') data.drop(labels=[2478],inplace= True) data['labels'].astype(int); data.dropna(inplace = True) cleaned_text = [] for text in data['texts']: text = " ".join(word for word in text.split() if not word.isdigit()) cleaned_text.append(text) data['cleaned_text'] = cleaned_text vocab = {} for text in data['cleaned_text']: sen = text.split() for word in sen: try: vocab[word] += 1 except KeyError: vocab[word] = 1 vocab = dict(sorted(vocab.items(), key=lambda item: item[1])) rare_words = [] for key,value in vocab.items(): if value<=10: rare_words.append(key) stopwords_en = set(stopwords.words('english')) cleaner_text = [] for text in data['cleaned_text']: text = " ".join([word for word in text.split() if len(word)>2 and word not in stopwords_en and word not in rare_words]) cleaner_text.append(text) data['final_text'] = cleaner_text vocab = {} for text in data['final_text']: sen = text.split() for word in sen: try: vocab[word] += 1 except KeyError: vocab[word] = 1 vocab = dict(sorted(vocab.items(), key=lambda item: item[1])) vocab_list = list(vocab.items()) vocab_size = len(vocab) x = data['final_text'].values y = data['labels'].values X_train,X_test,y_train, y_test = train_test_split(x,y, test_size = 0.2, shuffle = True) y_train = y_train.astype(int) y_test = y_test.astype(int) embeddings_index = dict() f = open('glove.twitter.27B.200d.txt') for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() print('Loaded %s word vectors.' % len(embeddings_index)) from tensorflow.python.keras.preprocessing import sequence from tensorflow.python.keras.preprocessing import text tokenizer = text.Tokenizer() tokenizer.fit_on_texts(X_train) X_train = tokenizer.texts_to_sequences(X_train) X_test = tokenizer.texts_to_sequences(X_test) X_train = sequence.pad_sequences(X_train,maxlen=200) X_test = sequence.pad_sequences(X_test,maxlen=200) tokens = len(tokenizer.word_index) + 2 embedding_matrix = np.zeros((tokens, 200)) count = 0 unknown = [] for word, i in tokenizer.word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: try: embedding_matrix[i] = embedding_vector except ValueError: unknown.append(word) count += 1 else: unknown.append(word) count += 1 print(1-(count/vocab_size)) model = Sequential() model.add(Embedding(tokens,200,weights = [embedding_matrix],input_length = embedding_matrix.shape[1])) model.add(LSTM(64)) model.add(Flatten()) model.add(Dense(1,activation='sigmoid')) model.compile(optimizer='adam',loss = 'binary_crossentropy',metrics=['accuracy']) print(model.summary()) model.fit(X_train,y_train,epochs=30) loss,accuracy = model.evaluate(X_train,y_train) print(f'acc: {accuracy}') predictions = model.predict(X_test) predictions = np.round(predictions) from sklearn.metrics import accuracy_score, confusion_matrix score = accuracy_score(y_test,predictions) cm = confusion_matrix(y_test,predictions) print("score: {} cm: {}".format(score,cm))
0.452294
0.197444
``` #loading need libraries import numpy as np import seaborn as sns import pandas as pd import matplotlib.pyplot as plt from scipy import stats %matplotlib inline #Load data for train and test train = pd.read_csv('../input/train.csv') train.head() #shape of train data train.shape #you can also check the data set information using the info() command. train.info() ``` ### Target variable Some analysis on target variable ``` plt.subplots(figsize=(12,9)) sns.distplot(train['SalePrice'], fit=stats.norm) # Get the fitted parameters used by the function (mu, sigma) = stats.norm.fit(train['SalePrice']) # plot with the distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') #Probablity plot fig = plt.figure() stats.probplot(train['SalePrice'], plot=plt) plt.show() ``` This target varibale is right skewed. Now, we need to tranform this variable and make it normal distribution. For more information about that [click here](http://whatis.techtarget.com/definition/skewness) #### Here we use log for target variable to make more normal distribution ``` #we use log function which is in numpy train['SalePrice'] = np.log1p(train['SalePrice']) #Check again for more normal distribution plt.subplots(figsize=(12,9)) sns.distplot(train['SalePrice'], fit=stats.norm) # Get the fitted parameters used by the function (mu, sigma) = stats.norm.fit(train['SalePrice']) # plot with the distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') #Probablity plot fig = plt.figure() stats.probplot(train['SalePrice'], plot=plt) plt.show() ``` ### Check the missing values ``` #Let's check if the data set has any missing values. train.columns[train.isnull().any()] #plot of missing value attributes plt.figure(figsize=(12, 6)) sns.heatmap(train.isnull()) plt.show() #missing value counts in each of these columns Isnull = train.isnull().sum()/len(train)*100 Isnull = Isnull[Isnull>0] Isnull.sort_values(inplace=True, ascending=False) Isnull ``` ### Visualising missing values ``` #Convert into dataframe Isnull = Isnull.to_frame() Isnull.columns = ['count'] Isnull.index.names = ['Name'] Isnull['Name'] = Isnull.index #plot Missing values plt.figure(figsize=(13, 5)) sns.set(style='whitegrid') sns.barplot(x='Name', y='count', data=Isnull) plt.xticks(rotation = 90) plt.show() ``` ### Corralation between train attributes ``` #Separate variable into new dataframe from original dataframe which has only numerical values #there is 38 numerical attribute from 81 attributes train_corr = train.select_dtypes(include=[np.number]) train_corr.shape #Delete Id because that is not need for corralation plot del train_corr['Id'] #Coralation plot corr = train_corr.corr() plt.subplots(figsize=(20,9)) sns.heatmap(corr, annot=True) ``` #### Top 50% Corralation train attributes with sale-price ``` top_feature = corr.index[abs(corr['SalePrice']>0.5)] plt.subplots(figsize=(12, 8)) top_corr = train[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.show() ``` Here OverallQual is highly correlated with target feature of saleprice by 82% ``` #unique value of OverallQual train.OverallQual.unique() sns.barplot(train.OverallQual, train.SalePrice) #boxplot plt.figure(figsize=(18, 8)) sns.boxplot(x=train.OverallQual, y=train.SalePrice) col = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'TotRmsAbvGrd', 'YearBuilt'] sns.set(style='ticks') sns.pairplot(train[col], size=3, kind='reg') print("Find most important features relative to target") corr = train.corr() corr.sort_values(['SalePrice'], ascending=False, inplace=True) corr.SalePrice ``` ### Imputting missing values ``` # PoolQC has missing value ratio is 99%+. So, there is fill by None train['PoolQC'] = train['PoolQC'].fillna('None') #Arround 50% missing values attributes have been fill by None train['MiscFeature'] = train['MiscFeature'].fillna('None') train['Alley'] = train['Alley'].fillna('None') train['Fence'] = train['Fence'].fillna('None') train['FireplaceQu'] = train['FireplaceQu'].fillna('None') #Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood train['LotFrontage'] = train.groupby("Neighborhood")["LotFrontage"].transform( lambda x: x.fillna(x.median())) #GarageType, GarageFinish, GarageQual and GarageCond these are replacing with None for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']: train[col] = train[col].fillna('None') #GarageYrBlt, GarageArea and GarageCars these are replacing with zero for col in ['GarageYrBlt', 'GarageArea', 'GarageCars']: train[col] = train[col].fillna(int(0)) #BsmtFinType2, BsmtExposure, BsmtFinType1, BsmtCond, BsmtQual these are replacing with None for col in ('BsmtFinType2', 'BsmtExposure', 'BsmtFinType1', 'BsmtCond', 'BsmtQual'): train[col] = train[col].fillna('None') #MasVnrArea : replace with zero train['MasVnrArea'] = train['MasVnrArea'].fillna(int(0)) #MasVnrType : replace with None train['MasVnrType'] = train['MasVnrType'].fillna('None') #There is put mode value train['Electrical'] = train['Electrical'].fillna(train['Electrical']).mode()[0] #There is no need of Utilities train = train.drop(['Utilities'], axis=1) #Checking there is any null value or not plt.figure(figsize=(10, 5)) sns.heatmap(train.isnull()) ``` ## Now, there is no any missing values #### Encoding str to int ``` cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond', 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1', 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope', 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', 'YrSold', 'MoSold', 'MSZoning', 'LandContour', 'LotConfig', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'Foundation', 'GarageType', 'MiscFeature', 'SaleType', 'SaleCondition', 'Electrical', 'Heating') from sklearn.preprocessing import LabelEncoder for c in cols: lbl = LabelEncoder() lbl.fit(list(train[c].values)) train[c] = lbl.transform(list(train[c].values)) ``` #### Prepraring data for prediction ``` #Take targate variable into y y = train['SalePrice'] #Delete the saleprice del train['SalePrice'] #Take their values in X and y X = train.values y = y.values # Split data into train and test formate from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=7) ``` ### Linear Regression ``` #Train the model from sklearn import linear_model model = linear_model.LinearRegression() #Fit the model model.fit(X_train, y_train) #Prediction print("Predict value " + str(model.predict([X_test[142]]))) print("Real value " + str(y_test[142])) #Score/Accuracy print("Accuracy --> ", model.score(X_test, y_test)*100) ``` ### RandomForestRegression ``` #Train the model from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor(n_estimators=1000) #Fit model.fit(X_train, y_train) #Score/Accuracy print("Accuracy --> ", model.score(X_test, y_test)*100) ``` ### GradientBoostingRegressor ``` #Train the model from sklearn.ensemble import GradientBoostingRegressor GBR = GradientBoostingRegressor(n_estimators=100, max_depth=4) #Fit GBR.fit(X_train, y_train) print("Accuracy --> ", GBR.score(X_test, y_test)*100) ```
github_jupyter
#loading need libraries import numpy as np import seaborn as sns import pandas as pd import matplotlib.pyplot as plt from scipy import stats %matplotlib inline #Load data for train and test train = pd.read_csv('../input/train.csv') train.head() #shape of train data train.shape #you can also check the data set information using the info() command. train.info() plt.subplots(figsize=(12,9)) sns.distplot(train['SalePrice'], fit=stats.norm) # Get the fitted parameters used by the function (mu, sigma) = stats.norm.fit(train['SalePrice']) # plot with the distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') #Probablity plot fig = plt.figure() stats.probplot(train['SalePrice'], plot=plt) plt.show() #we use log function which is in numpy train['SalePrice'] = np.log1p(train['SalePrice']) #Check again for more normal distribution plt.subplots(figsize=(12,9)) sns.distplot(train['SalePrice'], fit=stats.norm) # Get the fitted parameters used by the function (mu, sigma) = stats.norm.fit(train['SalePrice']) # plot with the distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') #Probablity plot fig = plt.figure() stats.probplot(train['SalePrice'], plot=plt) plt.show() #Let's check if the data set has any missing values. train.columns[train.isnull().any()] #plot of missing value attributes plt.figure(figsize=(12, 6)) sns.heatmap(train.isnull()) plt.show() #missing value counts in each of these columns Isnull = train.isnull().sum()/len(train)*100 Isnull = Isnull[Isnull>0] Isnull.sort_values(inplace=True, ascending=False) Isnull #Convert into dataframe Isnull = Isnull.to_frame() Isnull.columns = ['count'] Isnull.index.names = ['Name'] Isnull['Name'] = Isnull.index #plot Missing values plt.figure(figsize=(13, 5)) sns.set(style='whitegrid') sns.barplot(x='Name', y='count', data=Isnull) plt.xticks(rotation = 90) plt.show() #Separate variable into new dataframe from original dataframe which has only numerical values #there is 38 numerical attribute from 81 attributes train_corr = train.select_dtypes(include=[np.number]) train_corr.shape #Delete Id because that is not need for corralation plot del train_corr['Id'] #Coralation plot corr = train_corr.corr() plt.subplots(figsize=(20,9)) sns.heatmap(corr, annot=True) top_feature = corr.index[abs(corr['SalePrice']>0.5)] plt.subplots(figsize=(12, 8)) top_corr = train[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.show() #unique value of OverallQual train.OverallQual.unique() sns.barplot(train.OverallQual, train.SalePrice) #boxplot plt.figure(figsize=(18, 8)) sns.boxplot(x=train.OverallQual, y=train.SalePrice) col = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'TotRmsAbvGrd', 'YearBuilt'] sns.set(style='ticks') sns.pairplot(train[col], size=3, kind='reg') print("Find most important features relative to target") corr = train.corr() corr.sort_values(['SalePrice'], ascending=False, inplace=True) corr.SalePrice # PoolQC has missing value ratio is 99%+. So, there is fill by None train['PoolQC'] = train['PoolQC'].fillna('None') #Arround 50% missing values attributes have been fill by None train['MiscFeature'] = train['MiscFeature'].fillna('None') train['Alley'] = train['Alley'].fillna('None') train['Fence'] = train['Fence'].fillna('None') train['FireplaceQu'] = train['FireplaceQu'].fillna('None') #Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood train['LotFrontage'] = train.groupby("Neighborhood")["LotFrontage"].transform( lambda x: x.fillna(x.median())) #GarageType, GarageFinish, GarageQual and GarageCond these are replacing with None for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']: train[col] = train[col].fillna('None') #GarageYrBlt, GarageArea and GarageCars these are replacing with zero for col in ['GarageYrBlt', 'GarageArea', 'GarageCars']: train[col] = train[col].fillna(int(0)) #BsmtFinType2, BsmtExposure, BsmtFinType1, BsmtCond, BsmtQual these are replacing with None for col in ('BsmtFinType2', 'BsmtExposure', 'BsmtFinType1', 'BsmtCond', 'BsmtQual'): train[col] = train[col].fillna('None') #MasVnrArea : replace with zero train['MasVnrArea'] = train['MasVnrArea'].fillna(int(0)) #MasVnrType : replace with None train['MasVnrType'] = train['MasVnrType'].fillna('None') #There is put mode value train['Electrical'] = train['Electrical'].fillna(train['Electrical']).mode()[0] #There is no need of Utilities train = train.drop(['Utilities'], axis=1) #Checking there is any null value or not plt.figure(figsize=(10, 5)) sns.heatmap(train.isnull()) cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond', 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1', 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope', 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', 'YrSold', 'MoSold', 'MSZoning', 'LandContour', 'LotConfig', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'Foundation', 'GarageType', 'MiscFeature', 'SaleType', 'SaleCondition', 'Electrical', 'Heating') from sklearn.preprocessing import LabelEncoder for c in cols: lbl = LabelEncoder() lbl.fit(list(train[c].values)) train[c] = lbl.transform(list(train[c].values)) #Take targate variable into y y = train['SalePrice'] #Delete the saleprice del train['SalePrice'] #Take their values in X and y X = train.values y = y.values # Split data into train and test formate from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=7) #Train the model from sklearn import linear_model model = linear_model.LinearRegression() #Fit the model model.fit(X_train, y_train) #Prediction print("Predict value " + str(model.predict([X_test[142]]))) print("Real value " + str(y_test[142])) #Score/Accuracy print("Accuracy --> ", model.score(X_test, y_test)*100) #Train the model from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor(n_estimators=1000) #Fit model.fit(X_train, y_train) #Score/Accuracy print("Accuracy --> ", model.score(X_test, y_test)*100) #Train the model from sklearn.ensemble import GradientBoostingRegressor GBR = GradientBoostingRegressor(n_estimators=100, max_depth=4) #Fit GBR.fit(X_train, y_train) print("Accuracy --> ", GBR.score(X_test, y_test)*100)
0.47244
0.875255
``` from unityagents import UnityEnvironment import numpy as np import random from collections import deque import matplotlib.pyplot as plt from ddpg_agent import Agent from model import Actor, Critic import torch %matplotlib inline env = UnityEnvironment(file_name='../../Reacher_v1.app') # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # reset the environment env_info = env.reset(train_mode=False)[brain_name] # number of agents num_agents = len(env_info.agents) print('Number of agents:', num_agents) # size of each action action_size = brain.vector_action_space_size print('Size of each action:', action_size) # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size)) print('The state for the first agent looks like:', states[0]) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") random_seed=0 #Agent.actor_local = Actor(state_size, action_size, random_seed).to(device) #Agent.actor_local.load_state_dict(torch.load('checkpoint_actor_v1.pth')) agent = Agent(state_size=state_size, action_size=action_size, random_seed=10) # Agent.actor_local = Actor(state_size, action_size, random_seed).to(device) agent.actor_local.load_state_dict(torch.load('checkpoint_actor_v1.pth')) env_info = env.reset(train_mode=True)[brain_name] # reset the environment states = env_info.vector_observations # get the current state (for each agent) scores = np.zeros(num_agents) # initialize the score (for each agent) agent.reset() while True: #actions = np.random.randn(num_agents, action_size) # select an action (for each agent) #actions = np.clip(actions, -1, 1) # all actions between -1 and 1 action = agent.act(state) env_info = env.step(actions)[brain_name] # send all actions to tne environment next_states = env_info.vector_observations # get next state (for each agent) rewards = env_info.rewards # get reward (for each agent) dones = env_info.local_done # see if episode finished scores += env_info.rewards # update the score (for each agent) states = next_states # roll over states to next time step if np.any(dones): # exit loop if episode finished break print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores))) env.reset() ```
github_jupyter
from unityagents import UnityEnvironment import numpy as np import random from collections import deque import matplotlib.pyplot as plt from ddpg_agent import Agent from model import Actor, Critic import torch %matplotlib inline env = UnityEnvironment(file_name='../../Reacher_v1.app') # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # reset the environment env_info = env.reset(train_mode=False)[brain_name] # number of agents num_agents = len(env_info.agents) print('Number of agents:', num_agents) # size of each action action_size = brain.vector_action_space_size print('Size of each action:', action_size) # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size)) print('The state for the first agent looks like:', states[0]) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") random_seed=0 #Agent.actor_local = Actor(state_size, action_size, random_seed).to(device) #Agent.actor_local.load_state_dict(torch.load('checkpoint_actor_v1.pth')) agent = Agent(state_size=state_size, action_size=action_size, random_seed=10) # Agent.actor_local = Actor(state_size, action_size, random_seed).to(device) agent.actor_local.load_state_dict(torch.load('checkpoint_actor_v1.pth')) env_info = env.reset(train_mode=True)[brain_name] # reset the environment states = env_info.vector_observations # get the current state (for each agent) scores = np.zeros(num_agents) # initialize the score (for each agent) agent.reset() while True: #actions = np.random.randn(num_agents, action_size) # select an action (for each agent) #actions = np.clip(actions, -1, 1) # all actions between -1 and 1 action = agent.act(state) env_info = env.step(actions)[brain_name] # send all actions to tne environment next_states = env_info.vector_observations # get next state (for each agent) rewards = env_info.rewards # get reward (for each agent) dones = env_info.local_done # see if episode finished scores += env_info.rewards # update the score (for each agent) states = next_states # roll over states to next time step if np.any(dones): # exit loop if episode finished break print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores))) env.reset()
0.431824
0.459804
# "Tutorial 01: Getting Started With Statistics" > "An intuitive introduction to statistics" - toc: true - badges: true - comments: true - categories: [basic-stats] - sticky_rank: 1 ![stats](https://user-images.githubusercontent.com/33928040/102720029-b8488000-4317-11eb-9b54-48130147ed3e.jpg) # Overview * Statistics is often hailed as one of the most useful areas of mathematics. * It helps us to make educated guesses of the unknown, and find useful information in an ocean of data. But despite its usefulness, many people struggle with statistics: What is it? How does it work? Is it useful? * For many statistics feels like an unending collection of rules and formulae. If its misapplied, statistics can lead to false conclusions, causing people to develop mistrust in the subject. * In this tutorial we will get to know *What statistic is?* *Why do we need it?*, and *How can it be useful?* # What is Statistics? ## Defining The Term > Statistics is the science of *collecting*, *organising*, *analysing*, *interpreting* and *presenting* data. * Let's now break down the definition to understand its meaning. * The most important thing is: it's all about **data**. Without data, statistics can not exist. * So the first step is to **collect** data. There are a lot of ways to collect data like conducting a survey or performing experiments and recording the obtained data. * Once we have the data, the next step is to **organise** it, i.e. putting the data in some structured order like in a table. * Now we have our data in an organised format, we then **analyse** it. Here we perform techniques like mean, median, mode or other advance techniques to get a sense of what the data is all about. * Once we have the data analysed, we then **interpret** the data, i.e. what does the data tell us, and one very intuitive way to interpret the data is by visualizing it, where we **present** the data using some graphs or charts. * So, in layman's term *statistics* is the science of learning from data. ## Statistical & Non-statistical Questions * In layman's term, the questions where we need statistics to conclude something are known as *statistical questions* and the questions where we don't need statistics to conclude something are known as *non-statistical questions* * Lets first look at some examples, and then we will form a general definition. * Example 01: How old are you? * Here, we are talking about how old is a particular person. We don't need any tools of statistics here to answer this question. We can just ask the age. * So this is a *non-statistical question*. * Example 02: How old are the people who have watched a particular YouTube video in 2020? * Here, we are assuming that multiple people will have watched the video in 2020, and they are not going to be of the same age. There is going to be variability in their age. One can be of 10 years old others might be 20 and so on. * What answer do we give here? Here we want to get a sense of in general, how old are the people? So, this is where the statistics might be valuable. We want to find here, let say an average age for this. * So this is a *statistical question*. * Example 03: Do wolves weigh more than dogs? * So once again here is variability in the weights of dogs and wolves. Some dogs are light, and some are heavy, same goes for wolves as well. * Since we have variability in each of the categories, we might want to use statistics to answer this given question like finding an average and then comparing average to get an answer. * So, this is a *statistical question*. * Example 04: What was the difference in rainfall between Singapore and Seattle in 2020? * Now, these two numbers are known and can be measured. The rainfall in Singapore can be measured, as well as in Seattle, and then we can find the difference. * So, we don't need statistics here hence it's a *non-statistical question*. * Here is a pattern, in *non-statistical questions* we are asking about a particular individual. There is only one answer, due to which there is *no variability* in the answer. * In *statistical questions* we are asking about a bunch of individuals, and there is a *variability* in the answer. So, we need statistics here to come up with some features of the data set to be able to make some conclusions. > The **statistical question** is one that can be answered by collecting data and where there will be variability in that data. # Where Do We Use Statistics? * Let's start by seeing where statistics are involved in our everyday life, and then we will see how statistics plays a significant role in many different fields. * One familiar example we have here is how we manage our yearly or monthly budget. We go through how much money did we spent last year, what was the average money we spent, in which area we were spending the most, how much money did we saved? So all these questions come to our mind. It is only because of statistics and data we test out different things to save a little bit more money. * Other examples in our daily lives can be: * We can use statistics to see how are we performing in our exams. * It can help us see are we living a healthy life or not (by looking at what are we eating, how much are we exercising, what is our junk-food intake) * We use statistics when we look at the weather forecast and decide what to wear. * Other areas in which uses statistics heavily are: * Netflix uses it to predict what show we might want to watch next. * Amazon uses it to recommend different products. * Government use it to decide whether or not to invest more in child education or on any other sector. * Statistics plays a very significant role in all the medical studies (from drug creation to cancer treatment). * Stock market uses a statistical technique for stock analysis. * So, in some way or other statistics play a very vital role in our lives, from solving our family budget problem to diagnosis and treatment of deadly diseases to helping businesses in increasing their profits. It helps us to understand the world a little bit better through numbers and other quantitive information. > Important: Statistics is the building block for Data Science. It is a necessary skill that any data scientist or analyst should have. Data scientists and analysts use statistics to find meaningful trends in the data, and it is only possible if one knows statistics and how to use it. # How Is Statistics Useful? * We are living in a world full of data, and its amount is progressively increasing day by day. Some form of data is present in almost every field and to understand the meaning behind the data, you need statistics. * Statistical knowledge helps you use the proper methods to collect the data, employ correct analysis, and effectively present the results. * It is a crucial process behind how we make discoveries, make decisions based on the data and make predictions. * And allows us to understand a subject(be it medical science or sports) much more deeply. * So to conclude no matter what field we are in our work will be involved with data in some form or the other, and it is good to know statistics which will help us to understand the problem a little bit better. # Questionnaire **Ques 01.** Classify the following questions into statistical and non-statistical question. Also, explain the reason. &emsp;&emsp;1.1. Do dogs run faster than cats? &emsp;&emsp;1.2. Does your dog weigh more than that wolf? &emsp;&emsp;1.3. Does it rain more in Seattle than Singapore? &emsp;&emsp;1.4. In general, will I use less gas driving at 40 kmph than 50 kmph? &emsp;&emsp;1.5. Do English professors get paid less than math professors? &emsp;&emsp;1.6. Does the most highly paid English professor at Harvard get paid more than the most highly paid math professor at MIT in 2020? **Ques 02.** Data representing the heights of the students in a mathematics class is specified. Write down a statistical question you can answer using the data provided. **Ques 03.** Can you think of any other way(other than the listed examples in the above notes) you can use statistics in your daily life? **Ques 04.** Your college football(or any sport you want) coach wants his team to perform better in the upcoming tournament and ask for your help. Is there any way you can use statistics to help him out(given that you have data on the team's past performances)? If yes, then how? {{ '[Solutions](https://slothfulwave612.github.io/Better-Stats-Than-Never/solutions/2020/12/23/Exercise-Solutions.html#Tutorial-01:-Getting-Started-With-Statistics)' | fndetail: 1 }} {{ 'Notes are compiled from [ MySecretMathTutor](https://www.youtube.com/watch?v=SFPGVTThJNk), [Khan Academy](https://www.khanacademy.org/math/statistics-probability/designing-studies/statistics-overview/v/statistical-questions#:~:text=A%20statistical%20question%20is%20one,hat%20is%20Sara%20wearing%3F%22.), [Anywhere Math](https://www.youtube.com/watch?v=LMSyiAJm99g&t=607s), [CrashCourse](https://www.youtube.com/watch?v=sxQaBpKfDRk&t=297s) and results from [Google](https://www.google.com)' | fndetail: 2 }} {{ 'If you face any problem or have any feedback/suggestions feel free to comment.' | fndetail: 3 }}
github_jupyter
# "Tutorial 01: Getting Started With Statistics" > "An intuitive introduction to statistics" - toc: true - badges: true - comments: true - categories: [basic-stats] - sticky_rank: 1 ![stats](https://user-images.githubusercontent.com/33928040/102720029-b8488000-4317-11eb-9b54-48130147ed3e.jpg) # Overview * Statistics is often hailed as one of the most useful areas of mathematics. * It helps us to make educated guesses of the unknown, and find useful information in an ocean of data. But despite its usefulness, many people struggle with statistics: What is it? How does it work? Is it useful? * For many statistics feels like an unending collection of rules and formulae. If its misapplied, statistics can lead to false conclusions, causing people to develop mistrust in the subject. * In this tutorial we will get to know *What statistic is?* *Why do we need it?*, and *How can it be useful?* # What is Statistics? ## Defining The Term > Statistics is the science of *collecting*, *organising*, *analysing*, *interpreting* and *presenting* data. * Let's now break down the definition to understand its meaning. * The most important thing is: it's all about **data**. Without data, statistics can not exist. * So the first step is to **collect** data. There are a lot of ways to collect data like conducting a survey or performing experiments and recording the obtained data. * Once we have the data, the next step is to **organise** it, i.e. putting the data in some structured order like in a table. * Now we have our data in an organised format, we then **analyse** it. Here we perform techniques like mean, median, mode or other advance techniques to get a sense of what the data is all about. * Once we have the data analysed, we then **interpret** the data, i.e. what does the data tell us, and one very intuitive way to interpret the data is by visualizing it, where we **present** the data using some graphs or charts. * So, in layman's term *statistics* is the science of learning from data. ## Statistical & Non-statistical Questions * In layman's term, the questions where we need statistics to conclude something are known as *statistical questions* and the questions where we don't need statistics to conclude something are known as *non-statistical questions* * Lets first look at some examples, and then we will form a general definition. * Example 01: How old are you? * Here, we are talking about how old is a particular person. We don't need any tools of statistics here to answer this question. We can just ask the age. * So this is a *non-statistical question*. * Example 02: How old are the people who have watched a particular YouTube video in 2020? * Here, we are assuming that multiple people will have watched the video in 2020, and they are not going to be of the same age. There is going to be variability in their age. One can be of 10 years old others might be 20 and so on. * What answer do we give here? Here we want to get a sense of in general, how old are the people? So, this is where the statistics might be valuable. We want to find here, let say an average age for this. * So this is a *statistical question*. * Example 03: Do wolves weigh more than dogs? * So once again here is variability in the weights of dogs and wolves. Some dogs are light, and some are heavy, same goes for wolves as well. * Since we have variability in each of the categories, we might want to use statistics to answer this given question like finding an average and then comparing average to get an answer. * So, this is a *statistical question*. * Example 04: What was the difference in rainfall between Singapore and Seattle in 2020? * Now, these two numbers are known and can be measured. The rainfall in Singapore can be measured, as well as in Seattle, and then we can find the difference. * So, we don't need statistics here hence it's a *non-statistical question*. * Here is a pattern, in *non-statistical questions* we are asking about a particular individual. There is only one answer, due to which there is *no variability* in the answer. * In *statistical questions* we are asking about a bunch of individuals, and there is a *variability* in the answer. So, we need statistics here to come up with some features of the data set to be able to make some conclusions. > The **statistical question** is one that can be answered by collecting data and where there will be variability in that data. # Where Do We Use Statistics? * Let's start by seeing where statistics are involved in our everyday life, and then we will see how statistics plays a significant role in many different fields. * One familiar example we have here is how we manage our yearly or monthly budget. We go through how much money did we spent last year, what was the average money we spent, in which area we were spending the most, how much money did we saved? So all these questions come to our mind. It is only because of statistics and data we test out different things to save a little bit more money. * Other examples in our daily lives can be: * We can use statistics to see how are we performing in our exams. * It can help us see are we living a healthy life or not (by looking at what are we eating, how much are we exercising, what is our junk-food intake) * We use statistics when we look at the weather forecast and decide what to wear. * Other areas in which uses statistics heavily are: * Netflix uses it to predict what show we might want to watch next. * Amazon uses it to recommend different products. * Government use it to decide whether or not to invest more in child education or on any other sector. * Statistics plays a very significant role in all the medical studies (from drug creation to cancer treatment). * Stock market uses a statistical technique for stock analysis. * So, in some way or other statistics play a very vital role in our lives, from solving our family budget problem to diagnosis and treatment of deadly diseases to helping businesses in increasing their profits. It helps us to understand the world a little bit better through numbers and other quantitive information. > Important: Statistics is the building block for Data Science. It is a necessary skill that any data scientist or analyst should have. Data scientists and analysts use statistics to find meaningful trends in the data, and it is only possible if one knows statistics and how to use it. # How Is Statistics Useful? * We are living in a world full of data, and its amount is progressively increasing day by day. Some form of data is present in almost every field and to understand the meaning behind the data, you need statistics. * Statistical knowledge helps you use the proper methods to collect the data, employ correct analysis, and effectively present the results. * It is a crucial process behind how we make discoveries, make decisions based on the data and make predictions. * And allows us to understand a subject(be it medical science or sports) much more deeply. * So to conclude no matter what field we are in our work will be involved with data in some form or the other, and it is good to know statistics which will help us to understand the problem a little bit better. # Questionnaire **Ques 01.** Classify the following questions into statistical and non-statistical question. Also, explain the reason. &emsp;&emsp;1.1. Do dogs run faster than cats? &emsp;&emsp;1.2. Does your dog weigh more than that wolf? &emsp;&emsp;1.3. Does it rain more in Seattle than Singapore? &emsp;&emsp;1.4. In general, will I use less gas driving at 40 kmph than 50 kmph? &emsp;&emsp;1.5. Do English professors get paid less than math professors? &emsp;&emsp;1.6. Does the most highly paid English professor at Harvard get paid more than the most highly paid math professor at MIT in 2020? **Ques 02.** Data representing the heights of the students in a mathematics class is specified. Write down a statistical question you can answer using the data provided. **Ques 03.** Can you think of any other way(other than the listed examples in the above notes) you can use statistics in your daily life? **Ques 04.** Your college football(or any sport you want) coach wants his team to perform better in the upcoming tournament and ask for your help. Is there any way you can use statistics to help him out(given that you have data on the team's past performances)? If yes, then how? {{ '[Solutions](https://slothfulwave612.github.io/Better-Stats-Than-Never/solutions/2020/12/23/Exercise-Solutions.html#Tutorial-01:-Getting-Started-With-Statistics)' | fndetail: 1 }} {{ 'Notes are compiled from [ MySecretMathTutor](https://www.youtube.com/watch?v=SFPGVTThJNk), [Khan Academy](https://www.khanacademy.org/math/statistics-probability/designing-studies/statistics-overview/v/statistical-questions#:~:text=A%20statistical%20question%20is%20one,hat%20is%20Sara%20wearing%3F%22.), [Anywhere Math](https://www.youtube.com/watch?v=LMSyiAJm99g&t=607s), [CrashCourse](https://www.youtube.com/watch?v=sxQaBpKfDRk&t=297s) and results from [Google](https://www.google.com)' | fndetail: 2 }} {{ 'If you face any problem or have any feedback/suggestions feel free to comment.' | fndetail: 3 }}
0.867724
0.927757
# Example classification analysis using ShinyLearner *By Erica Suh and Stephen Piccolo* This notebook illustrates how to perform a benchmark comparison of classification algorithms using [ShinyLearner](https://github.com/srp33/ShinyLearner). We assume the reader has a moderate level of understanding of shell and Python scripting. We also assume that the user's operating system is UNIX-based. ### Install Python modules ``` %%bash # This step may or may not be necessary on your system: pip3 install --upgrade pip # You only need to install these modules once pip3 install pmlb pandas numpy ``` ### Preparing the data First, let's generate a "null" dataset that contains no signal to ensure that ShinyLearner doesn't find a signal when there is nothing to be found. ``` import numpy as np import os import pandas import shutil def one_hot_encode(file_path, column_names): data = pandas.read_csv(file_path, index_col=0, sep="\t") if column_names == None: column_names = [x for x in list(data) if not x in ["Class"]] data = pandas.get_dummies(data, drop_first=True, columns=column_names) data.to_csv(file_path, sep="\t", index=True) directory = "Datasets" if os.path.exists(directory): shutil.rmtree(directory) os.makedirs(directory) np.random.seed(0) num_observations = 500 num_numeric_features = 20 num_discrete_features = 10 data_dict = {} data_dict[""] = ["Instance{}".format(i+1) for i in range(num_observations)] data_dict["Class"] = np.random.choice([0, 1], size=num_observations, p=[0.5, 0.5]) for i in range(num_numeric_features): data_dict["Numeric{}".format(i+1)] = np.random.normal(0, 1, num_observations) for i in range(num_discrete_features): data_dict["Discrete{}".format(i+1)] = np.random.choice(["A", "B", "C"], size=num_observations, p=[0.4, 0.5, 0.1]) df = pandas.DataFrame(data=data_dict) df.set_index("", inplace=True) file_path = '{}/{}.tsv'.format(directory, "null") df.to_csv(file_path, sep="\t", index=True) one_hot_encode('{}/{}.tsv'.format(directory, "null"), [x for x in data_dict.keys() if x.startswith("Discrete")]) ``` The Penn Machine Learning Benchmarks (PMLB) repository contains a large number of datasets that can be used to test machine-learning algorithms. We can access this repository using the Python module named `pmlb`. For demonstration purposes, we will fetch 10 biology-related datasets from PMLB. First, define a list that indicates the unique identifier for each of these datasets. ``` datasets = ['analcatdata_aids', 'ann-thyroid', 'breast-cancer', 'dermatology', 'diabetes', 'hepatitis', 'iris', 'liver-disorder', 'molecular-biology_promoters', 'yeast'] ``` ShinyLearner requires that input data files have exactly one feature named 'Class', which includes the class labels. So we must modify the PMLB data to meet this requirement. After modifying the data, we save each DataFrame to a a file with a [supported extension](https://github.com/srp33/ShinyLearner/blob/master/InputFormats.md). (See PMLB's [GitHub repository](https://github.com/EpistasisLab/penn-ml-benchmarks) for more information about how to use this module.) ``` from pmlb import fetch_data for data in datasets: curr_data = fetch_data(data) curr_data = curr_data.rename(columns={'target': 'Class'}) # Rename 'target' to 'Class' if data == "molecular-biology_promoters": curr_data = curr_data.drop(columns=["instance"], axis=1) curr_data.to_csv('{}/{}.tsv'.format(directory, data), sep='\t', index=True) # Save to a .tsv file one_hot_encode('{}/{}.tsv'.format(directory, "analcatdata_aids"), ["Race"]) one_hot_encode('{}/{}.tsv'.format(directory, "breast-cancer"), ["menopause", "breast-quad"]) one_hot_encode('{}/{}.tsv'.format(directory, "molecular-biology_promoters"), None) ``` ### Performing a benchmark comparison of 10 classification algorithms For this initial analysis, we will apply 10 different classification algorithms to each dataset. Initially, we will use Monte Carlo cross validation (with *no* hyperparameter optimization). To keep the execution time reasonable, we will do 5 iterations of Monte Carlo cross validation. ShinyLearner is executed within a Docker container. The ShinyLearner [web application](http://bioapps.byu.edu/shinylearner/) enables us to more easily build commands for executing ShinyLearner within Docker at the command line. We used this tool to create a template command. Below we modify that template and execute ShinyLearner for each dataset. We also indicate that we want to one-hot encode (`--ohe`) and scale the data (`--scale`) and that we want to impute any missing values (`--impute`). (*This process takes awhile to execute. You won't see any output until the analysis has completed. To facilitate this long-running execution, you could [run this notebook at the command line](https://stackoverflow.com/a/40311709). Also, we could use the `shinylearner_gpu` Docker image to speed up the keras algorithm, but that requires `nvidia-docker` to be installed, so we are using the regular, non-GPU image.*) ``` %%bash function runShinyLearner { dataset_file_path="$1" dataset_file_name="$(basename $dataset_file_path)" dataset_name="${dataset_file_name/\.tsv/}" dataset_results_dir_path="$(pwd)/Results_Basic/$dataset_name" mkdir -p "$dataset_results_dir_path" docker run --rm \ -v "$(pwd)/Datasets":/InputData \ -v "$dataset_results_dir_path":/OutputData \ --user $(id -u):$(id -g) \ srp33/shinylearner:version513 \ /UserScripts/classification_montecarlo \ --data "$dataset_file_name" \ --description "$dataset_name" \ --iterations 5 \ --classif-algo "/AlgorithmScripts/Classification/tsv/keras/dnn/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/xgboost/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/h2o.randomForest/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/mlp/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/decision_tree/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/logistic_regression/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/svm/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/HoeffdingTree/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/MultilayerPerceptron/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/SimpleLogistic/default*" \ --output-dir "/OutputData" \ --ohe false \ --scale robust \ --impute true \ --verbose false } rm -rf Results_Basic for dataset_file_path in ./Datasets/*.tsv do runShinyLearner "$dataset_file_path" done ``` ### Repeating the benchmark comparison with hyperparameter optimization ShinyLearner provides an option to optimize a classification algorithm's hyperparameters. To accomplish this, it uses nested cross validation. This process requires more computational time, but it often increases classification accuracy. In the code below, we execute the same 10 classification algorithms on the same 10 datasets. There are some differences in the code below compared to the code above: 1. We store the output in `Results_ParamsOptimized` rather than `Results_Basic`. 2. We use the `nestedclassification_montecarlo` user script rather than `classification_montecarlo`. 3. The path specified for each classification algorithm ends with `*` rather than `default*`. This tells ShinyLearner to evaluate all hyperparameter combinations, not just default ones. 4. We indicate that we want to use 5 "outer" iterations and 3 "inner" iterations (to optimize hyperparameters). ``` %%bash function runShinyLearner { dataset_file_path="$1" dataset_file_name="$(basename $dataset_file_path)" dataset_name="${dataset_file_name/\.tsv/}" dataset_results_dir_path="$(pwd)/Results_ParamsOptimized/$dataset_name" mkdir -p $dataset_results_dir_path docker run --rm \ -v "$(pwd)/Datasets":/InputData \ -v "$dataset_results_dir_path":/OutputData \ --user $(id -u):$(id -g) \ srp33/shinylearner:version513 \ /UserScripts/nestedclassification_montecarlo \ --data "$dataset_file_name" \ --description "$dataset_name" \ --outer-iterations 5 \ --inner-iterations 3 \ --classif-algo "/AlgorithmScripts/Classification/tsv/keras/dnn/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/xgboost/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/h2o.randomForest/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/mlp/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/decision_tree/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/logistic_regression/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/svm/*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/HoeffdingTree/*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/MultilayerPerceptron/*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/SimpleLogistic/*" \ --ohe false \ --scale robust \ --impute true \ --verbose false } rm -rf Results_ParamsOptimized for dataset_file_path in ./Datasets/*.tsv do runShinyLearner "$dataset_file_path" done ``` ### Repeating the benchmark comparison with feature selection (along with classification) In this example, we will try 5 feature-selection algorithms in combination with the same 10 classification algorithms that we used previously. Although we could optimize hyperparameters as well, we won't do that, to reduce computational complexity. We have changed the following from the previous example: * We store the results in the `Results_FeatureSelection` directory. * We use the `nestedboth_montecarlo` user script. * We use default hyperparameters. * We added `--fs-algo` and `--num-features` arguments. ``` %%bash function runShinyLearner { dataset_file_path="$1" dataset_file_name="$(basename $dataset_file_path)" dataset_name="${dataset_file_name/\.tsv/}" dataset_results_dir_path="$(pwd)/Results_FeatureSelection/$dataset_name" mkdir -p $dataset_results_dir_path docker run --rm \ -v "$(pwd)/Datasets":/InputData \ -v "$dataset_results_dir_path":/OutputData \ --user $(id -u):$(id -g) \ srp33/shinylearner:version513 \ /UserScripts/nestedboth_montecarlo \ --data "$dataset_file_name" \ --description "$dataset_name" \ --outer-iterations 5 \ --inner-iterations 3 \ --classif-algo "/AlgorithmScripts/Classification/tsv/keras/dnn/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/xgboost/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/h2o.randomForest/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/mlp/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/decision_tree/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/logistic_regression/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/svm/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/HoeffdingTree/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/MultilayerPerceptron/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/SimpleLogistic/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/mlr/kruskal.test/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/mlr/randomForestSRC.rfsrc/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/sklearn/mutual_info/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/sklearn/random_forest_rfe/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/sklearn/svm_rfe/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/Correlation/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/GainRatio/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/OneR/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/ReliefF/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/SymmetricalUncertainty/default*" \ --num-features "1,3,5,10,15,20,50,200" \ --ohe false \ --scale robust \ --impute true \ --verbose false } rm -rf Results_FeatureSelection for dataset_file_path in ./Datasets/*.tsv do runShinyLearner "$dataset_file_path" done ``` ### Compress output files and clean up ``` %%bash # These files are relatively large and we won't use them to make graphs, so let's delete them. rm -fv Results_ParamsOptimized/*/Nested_ElapsedTime.tsv rm -fv Results_ParamsOptimized/*/Nested_Best.tsv mv Results_ParamsOptimized/diabetes/Nested_Predictions.tsv Results_ParamsOptimized/diabetes/Nested_Predictions.tsv.tmp rm -fv Results_ParamsOptimized/*/Nested_Predictions.tsv mv Results_ParamsOptimized/diabetes/Nested_Predictions.tsv.tmp Results_ParamsOptimized/diabetes/Nested_Predictions.tsv rm -fv Results_FeatureSelection/*/Nested_Predictions.tsv rm -fv Results_FeatureSelection/*/Nested_*ElapsedTime.tsv rm -fv Results_FeatureSelection/*/Nested_Best.tsv rm -rfv Datasets ``` ### Analyzing and visualizing the results Please see the document called `Analyze_Results.Rmd`, which contains R code for analyzing and visualizing the results.
github_jupyter
%%bash # This step may or may not be necessary on your system: pip3 install --upgrade pip # You only need to install these modules once pip3 install pmlb pandas numpy import numpy as np import os import pandas import shutil def one_hot_encode(file_path, column_names): data = pandas.read_csv(file_path, index_col=0, sep="\t") if column_names == None: column_names = [x for x in list(data) if not x in ["Class"]] data = pandas.get_dummies(data, drop_first=True, columns=column_names) data.to_csv(file_path, sep="\t", index=True) directory = "Datasets" if os.path.exists(directory): shutil.rmtree(directory) os.makedirs(directory) np.random.seed(0) num_observations = 500 num_numeric_features = 20 num_discrete_features = 10 data_dict = {} data_dict[""] = ["Instance{}".format(i+1) for i in range(num_observations)] data_dict["Class"] = np.random.choice([0, 1], size=num_observations, p=[0.5, 0.5]) for i in range(num_numeric_features): data_dict["Numeric{}".format(i+1)] = np.random.normal(0, 1, num_observations) for i in range(num_discrete_features): data_dict["Discrete{}".format(i+1)] = np.random.choice(["A", "B", "C"], size=num_observations, p=[0.4, 0.5, 0.1]) df = pandas.DataFrame(data=data_dict) df.set_index("", inplace=True) file_path = '{}/{}.tsv'.format(directory, "null") df.to_csv(file_path, sep="\t", index=True) one_hot_encode('{}/{}.tsv'.format(directory, "null"), [x for x in data_dict.keys() if x.startswith("Discrete")]) datasets = ['analcatdata_aids', 'ann-thyroid', 'breast-cancer', 'dermatology', 'diabetes', 'hepatitis', 'iris', 'liver-disorder', 'molecular-biology_promoters', 'yeast'] from pmlb import fetch_data for data in datasets: curr_data = fetch_data(data) curr_data = curr_data.rename(columns={'target': 'Class'}) # Rename 'target' to 'Class' if data == "molecular-biology_promoters": curr_data = curr_data.drop(columns=["instance"], axis=1) curr_data.to_csv('{}/{}.tsv'.format(directory, data), sep='\t', index=True) # Save to a .tsv file one_hot_encode('{}/{}.tsv'.format(directory, "analcatdata_aids"), ["Race"]) one_hot_encode('{}/{}.tsv'.format(directory, "breast-cancer"), ["menopause", "breast-quad"]) one_hot_encode('{}/{}.tsv'.format(directory, "molecular-biology_promoters"), None) %%bash function runShinyLearner { dataset_file_path="$1" dataset_file_name="$(basename $dataset_file_path)" dataset_name="${dataset_file_name/\.tsv/}" dataset_results_dir_path="$(pwd)/Results_Basic/$dataset_name" mkdir -p "$dataset_results_dir_path" docker run --rm \ -v "$(pwd)/Datasets":/InputData \ -v "$dataset_results_dir_path":/OutputData \ --user $(id -u):$(id -g) \ srp33/shinylearner:version513 \ /UserScripts/classification_montecarlo \ --data "$dataset_file_name" \ --description "$dataset_name" \ --iterations 5 \ --classif-algo "/AlgorithmScripts/Classification/tsv/keras/dnn/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/xgboost/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/h2o.randomForest/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/mlp/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/decision_tree/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/logistic_regression/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/svm/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/HoeffdingTree/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/MultilayerPerceptron/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/SimpleLogistic/default*" \ --output-dir "/OutputData" \ --ohe false \ --scale robust \ --impute true \ --verbose false } rm -rf Results_Basic for dataset_file_path in ./Datasets/*.tsv do runShinyLearner "$dataset_file_path" done %%bash function runShinyLearner { dataset_file_path="$1" dataset_file_name="$(basename $dataset_file_path)" dataset_name="${dataset_file_name/\.tsv/}" dataset_results_dir_path="$(pwd)/Results_ParamsOptimized/$dataset_name" mkdir -p $dataset_results_dir_path docker run --rm \ -v "$(pwd)/Datasets":/InputData \ -v "$dataset_results_dir_path":/OutputData \ --user $(id -u):$(id -g) \ srp33/shinylearner:version513 \ /UserScripts/nestedclassification_montecarlo \ --data "$dataset_file_name" \ --description "$dataset_name" \ --outer-iterations 5 \ --inner-iterations 3 \ --classif-algo "/AlgorithmScripts/Classification/tsv/keras/dnn/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/xgboost/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/h2o.randomForest/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/mlp/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/decision_tree/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/logistic_regression/*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/svm/*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/HoeffdingTree/*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/MultilayerPerceptron/*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/SimpleLogistic/*" \ --ohe false \ --scale robust \ --impute true \ --verbose false } rm -rf Results_ParamsOptimized for dataset_file_path in ./Datasets/*.tsv do runShinyLearner "$dataset_file_path" done %%bash function runShinyLearner { dataset_file_path="$1" dataset_file_name="$(basename $dataset_file_path)" dataset_name="${dataset_file_name/\.tsv/}" dataset_results_dir_path="$(pwd)/Results_FeatureSelection/$dataset_name" mkdir -p $dataset_results_dir_path docker run --rm \ -v "$(pwd)/Datasets":/InputData \ -v "$dataset_results_dir_path":/OutputData \ --user $(id -u):$(id -g) \ srp33/shinylearner:version513 \ /UserScripts/nestedboth_montecarlo \ --data "$dataset_file_name" \ --description "$dataset_name" \ --outer-iterations 5 \ --inner-iterations 3 \ --classif-algo "/AlgorithmScripts/Classification/tsv/keras/dnn/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/xgboost/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/h2o.randomForest/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/mlr/mlp/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/decision_tree/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/logistic_regression/default*" \ --classif-algo "/AlgorithmScripts/Classification/tsv/sklearn/svm/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/HoeffdingTree/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/MultilayerPerceptron/default*" \ --classif-algo "/AlgorithmScripts/Classification/arff/weka/SimpleLogistic/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/mlr/kruskal.test/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/mlr/randomForestSRC.rfsrc/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/sklearn/mutual_info/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/sklearn/random_forest_rfe/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/tsv/sklearn/svm_rfe/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/Correlation/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/GainRatio/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/OneR/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/ReliefF/default*" \ --fs-algo "/AlgorithmScripts/FeatureSelection/arff/weka/SymmetricalUncertainty/default*" \ --num-features "1,3,5,10,15,20,50,200" \ --ohe false \ --scale robust \ --impute true \ --verbose false } rm -rf Results_FeatureSelection for dataset_file_path in ./Datasets/*.tsv do runShinyLearner "$dataset_file_path" done %%bash # These files are relatively large and we won't use them to make graphs, so let's delete them. rm -fv Results_ParamsOptimized/*/Nested_ElapsedTime.tsv rm -fv Results_ParamsOptimized/*/Nested_Best.tsv mv Results_ParamsOptimized/diabetes/Nested_Predictions.tsv Results_ParamsOptimized/diabetes/Nested_Predictions.tsv.tmp rm -fv Results_ParamsOptimized/*/Nested_Predictions.tsv mv Results_ParamsOptimized/diabetes/Nested_Predictions.tsv.tmp Results_ParamsOptimized/diabetes/Nested_Predictions.tsv rm -fv Results_FeatureSelection/*/Nested_Predictions.tsv rm -fv Results_FeatureSelection/*/Nested_*ElapsedTime.tsv rm -fv Results_FeatureSelection/*/Nested_Best.tsv rm -rfv Datasets
0.231267
0.88573
### Canned Estimators In this notebook we'll demonstrate how to use two Canned Estimators (these encapsulate the lower-level TensorFlow code we've seen so far, and use an API loosely inspired by [scikit-learn](scikit-learn.org). There are several advantages to Canned Estimators. * If you're using Estimators, you won't have to manage Sessions, or write your own logic for TensorBoard, or for saving and loading checkpoints. * You'll get out-of-the-box distributed training (of course, you will have to take care to read your data efficiently, and set up a cluster). Here, we'll read data using [input functions](https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn), which are appropriate for in-memory data. * These provide batching and other features for you, so you don't have to write that code yourself. * In the strucuted data notebook, we'll use the new [Dataset API](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/docs_src/programmers_guide/datasets.md) - which is a popular abstraction, and a great way to efficiently read and pre-process large datasets efficiently. Although the Estimators we'll use here are relative simple (a LinearClassifier, and a Fully Connected Deep Neural Network), we also provide more interesting ones (including for [TensorFlow Wide and Deep](https://www.tensorflow.org/tutorials/wide_and_deep). I'm also excited that additional Estimators are on their way - stay tuned in the upcoming months. Also note that Estimators log quite a lot of output ``` from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf # We'll use Keras (included with TensorFlow) to import the data (x_train, y_train), (x_test, y_test) = tf.contrib.keras.datasets.mnist.load_data() x_train = x_train.astype('float32') x_test = x_test.astype('float32') y_train = y_train.astype('int32') y_test = y_test.astype('int32') # Normalize the color values to 0-1 # (as imported, they're 0-255) x_train /= 255 x_test /= 255 print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') ``` Here's our input function. * By setting ```num_epochs``` to ```None```, we'll loop over the data indefinitely so we can train for as long as we like. * The default ```batch_size``` is ```128```, but you can provide a different parameter if you like. You can read more about the numpy input function [here](https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn). We also provide a nice one for [Pandas](https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/pandas_input_fn), more on that later. ``` train_input = tf.estimator.inputs.numpy_input_fn( {'x': x_train}, y_train, num_epochs=None, # repeat forever shuffle=True # ) test_input = tf.estimator.inputs.numpy_input_fn( {'x': x_test}, y_test, num_epochs=1, # loop through the dataset once shuffle=False # don't shuffle the test data ) # define the features for our model # the names must match the input function feature_spec = [tf.feature_column.numeric_column('x', shape=784)] ``` Here, we'll create a ```LinearClassifier``` - this is identical to our Softmax (aka, multiclass logistic regression model) from the second notebok. ``` estimator = tf.estimator.LinearClassifier(feature_spec, n_classes=10, model_dir="./graphs/canned/linear") # I've arbitrarily decided to train for 1000 steps estimator.train(train_input, steps=1000) # We should see about 90% accuracy here. evaluation = estimator.evaluate(input_fn=test_input) print(evaluation) ``` Here's how you would print individual predictions. ``` MAX_TO_PRINT = 5 # This returns a generator object predictions = estimator.predict(input_fn=test_input) i = 0 for p in predictions: true_label = y_test[i] predicted_label = p['class_ids'][0] print("Example %d. True: %d, Predicted: %d" % (i, true_label, predicted_label)) i += 1 if i == MAX_TO_PRINT: break ``` Here's how easy it is to switch the model to a fully connected DNN. ``` estimator = tf.estimator.DNNClassifier( hidden_units=[256], # we will arbitrarily use two layers feature_columns=feature_spec, n_classes=10, model_dir="./graphs/canned/deep") # I've arbitrarily decided to train for 2000 steps estimator.train(train_input, steps=2000) # Expect accuracy around 97% evaluation = estimator.evaluate(input_fn=test_input) print(evaluation) ``` If you like, you can compare these runs with TensorBoard. ``` $ tensorboard --logdir=graphs/canned/ ```
github_jupyter
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf # We'll use Keras (included with TensorFlow) to import the data (x_train, y_train), (x_test, y_test) = tf.contrib.keras.datasets.mnist.load_data() x_train = x_train.astype('float32') x_test = x_test.astype('float32') y_train = y_train.astype('int32') y_test = y_test.astype('int32') # Normalize the color values to 0-1 # (as imported, they're 0-255) x_train /= 255 x_test /= 255 print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') train_input = tf.estimator.inputs.numpy_input_fn( {'x': x_train}, y_train, num_epochs=None, # repeat forever shuffle=True # ) test_input = tf.estimator.inputs.numpy_input_fn( {'x': x_test}, y_test, num_epochs=1, # loop through the dataset once shuffle=False # don't shuffle the test data ) # define the features for our model # the names must match the input function feature_spec = [tf.feature_column.numeric_column('x', shape=784)] estimator = tf.estimator.LinearClassifier(feature_spec, n_classes=10, model_dir="./graphs/canned/linear") # I've arbitrarily decided to train for 1000 steps estimator.train(train_input, steps=1000) # We should see about 90% accuracy here. evaluation = estimator.evaluate(input_fn=test_input) print(evaluation) MAX_TO_PRINT = 5 # This returns a generator object predictions = estimator.predict(input_fn=test_input) i = 0 for p in predictions: true_label = y_test[i] predicted_label = p['class_ids'][0] print("Example %d. True: %d, Predicted: %d" % (i, true_label, predicted_label)) i += 1 if i == MAX_TO_PRINT: break estimator = tf.estimator.DNNClassifier( hidden_units=[256], # we will arbitrarily use two layers feature_columns=feature_spec, n_classes=10, model_dir="./graphs/canned/deep") # I've arbitrarily decided to train for 2000 steps estimator.train(train_input, steps=2000) # Expect accuracy around 97% evaluation = estimator.evaluate(input_fn=test_input) print(evaluation)
0.770896
0.985963
``` import os os.chdir('../../') import sys sys.path.insert(0, './python') sys.path.append('/local-scratch/xca64/tmp/caffe-master/python/myFunc') import caffe import numpy as np from pylab import * %matplotlib inline niter = 200 # losses will also be stored in the log train_loss = np.zeros(niter) scratch_train_loss = np.zeros(niter) caffe.set_device(0) caffe.set_mode_gpu() solver = caffe.SGDSolver('models/color_constancy/solver.prototxt') solver.net.copy_from('models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel') import tempfile def run_solver(solver, niter, disp_interval): blobs = ('loss', 'acc') loss, acc = (np.zeros(niter), np.zeros(niter)) for it in range(niter): solver.step(1) # run a single SGD step in Caffe loss[it] = (solver.net.blobs['loss'].data.copy()) acc[it] = 0#(solver.net.blobs['loss_ang'].data.copy()) if it % disp_interval == 0 or it + 1 == niter: loss_disp = 'loss: %.3f'%loss[it] print '%3d) %s Angular Erro %.3f' % (it, loss_disp, acc[it]) #print(solver.net.blobs['fc8_flickr'].data[1], solver.net.blobs['illu'].data[1]) # Save the learned weights from both nets. weight_dir = tempfile.mkdtemp() name = 'firstTry' weights = {} filename = 'weights.%s.caffemodel' % name weights[name] = os.path.join(weight_dir, filename) solver.net.save(weights[name]) return loss, acc, weights loss_1, acc_1, weights_1 = run_solver(solver, 1000,10) print [solver.net.blobs['fc8_flickr'].data, solver.net.blobs['illu'].data] solver = caffe.SGDSolver('models/color_constancy/solver.prototxt') solver.net.copy_from('models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel') solver.step(1) # SGD by Caffe import matplotlib.pyplot as plt import matplotlib.image as mpimg def deprocess_net_image(image, gamma): image = image.copy() # don't modify destructively image = image[::-1] # BGR -> RGB image = image.transpose(1, 2, 0) # CHW -> HWC image += [123, 117, 104] # (approximately) undo mean subtraction # clamp values in [0, 255] image[image < 0], image[image > 255] = 0, 255 image = image.astype(np.float32) image = image/255 image = image**gamma #Gamma correction image = image*255 # round and cast from float32 to uint8 image = np.round(image) image = np.require(image, dtype=np.uint8) return image def my_deprocess_net_image(image, gamma): image = image.copy() # don't modify destructively image = image[::-1] # BGR -> RGB image = image.transpose(1, 2, 0) # CHW -> HWC #image += [123, 117, 104] # (approximately) undo mean subtraction # clamp values in [0, 255] image[image < 0], image[image > 255] = 0, 255 image = image.astype(np.float32) image = image/255 image = image**gamma #Gamma correction image = image*255 # round and cast from float32 to uint8 image = np.round(image) image = np.require(image, dtype=np.uint8) return image for i in range(5): img = my_deprocess_net_image(solver.net.blobs['data'].data[i+20], 0.5) plt.imshow(img) plt.figure(i+1) label = solver.net.blobs['illu'].data[20] print label print np.max(solver.net.blobs['data'].data[20]) # We run the solver for niter times, and record the training loss. for it in range(niter): solver.step(1) # SGD by Caffe # store the train loss train_loss[it] = solver.net.blobs['loss'].data if it % 10 == 0: print 'iter %d, finetune_loss=%f, scratch_loss=' % (it, train_loss[it]) print 'done' ```
github_jupyter
import os os.chdir('../../') import sys sys.path.insert(0, './python') sys.path.append('/local-scratch/xca64/tmp/caffe-master/python/myFunc') import caffe import numpy as np from pylab import * %matplotlib inline niter = 200 # losses will also be stored in the log train_loss = np.zeros(niter) scratch_train_loss = np.zeros(niter) caffe.set_device(0) caffe.set_mode_gpu() solver = caffe.SGDSolver('models/color_constancy/solver.prototxt') solver.net.copy_from('models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel') import tempfile def run_solver(solver, niter, disp_interval): blobs = ('loss', 'acc') loss, acc = (np.zeros(niter), np.zeros(niter)) for it in range(niter): solver.step(1) # run a single SGD step in Caffe loss[it] = (solver.net.blobs['loss'].data.copy()) acc[it] = 0#(solver.net.blobs['loss_ang'].data.copy()) if it % disp_interval == 0 or it + 1 == niter: loss_disp = 'loss: %.3f'%loss[it] print '%3d) %s Angular Erro %.3f' % (it, loss_disp, acc[it]) #print(solver.net.blobs['fc8_flickr'].data[1], solver.net.blobs['illu'].data[1]) # Save the learned weights from both nets. weight_dir = tempfile.mkdtemp() name = 'firstTry' weights = {} filename = 'weights.%s.caffemodel' % name weights[name] = os.path.join(weight_dir, filename) solver.net.save(weights[name]) return loss, acc, weights loss_1, acc_1, weights_1 = run_solver(solver, 1000,10) print [solver.net.blobs['fc8_flickr'].data, solver.net.blobs['illu'].data] solver = caffe.SGDSolver('models/color_constancy/solver.prototxt') solver.net.copy_from('models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel') solver.step(1) # SGD by Caffe import matplotlib.pyplot as plt import matplotlib.image as mpimg def deprocess_net_image(image, gamma): image = image.copy() # don't modify destructively image = image[::-1] # BGR -> RGB image = image.transpose(1, 2, 0) # CHW -> HWC image += [123, 117, 104] # (approximately) undo mean subtraction # clamp values in [0, 255] image[image < 0], image[image > 255] = 0, 255 image = image.astype(np.float32) image = image/255 image = image**gamma #Gamma correction image = image*255 # round and cast from float32 to uint8 image = np.round(image) image = np.require(image, dtype=np.uint8) return image def my_deprocess_net_image(image, gamma): image = image.copy() # don't modify destructively image = image[::-1] # BGR -> RGB image = image.transpose(1, 2, 0) # CHW -> HWC #image += [123, 117, 104] # (approximately) undo mean subtraction # clamp values in [0, 255] image[image < 0], image[image > 255] = 0, 255 image = image.astype(np.float32) image = image/255 image = image**gamma #Gamma correction image = image*255 # round and cast from float32 to uint8 image = np.round(image) image = np.require(image, dtype=np.uint8) return image for i in range(5): img = my_deprocess_net_image(solver.net.blobs['data'].data[i+20], 0.5) plt.imshow(img) plt.figure(i+1) label = solver.net.blobs['illu'].data[20] print label print np.max(solver.net.blobs['data'].data[20]) # We run the solver for niter times, and record the training loss. for it in range(niter): solver.step(1) # SGD by Caffe # store the train loss train_loss[it] = solver.net.blobs['loss'].data if it % 10 == 0: print 'iter %d, finetune_loss=%f, scratch_loss=' % (it, train_loss[it]) print 'done'
0.368406
0.225225
# End-to-End AutoML for Insurance Cross-Sell ## Part 3 - H2O AutoML with MLflow ### Contents [Part 1 - Initial Setup](#setup) [Part 2 - H2O AutoML Training with MLflow Tracking](#automl) [Part 3 - Predict with H2O AutoML Best Model](#predict) [Part 4 - H2O Model Explainability](#explain) [Part 5 - References](#references) ___ <a name="setup"></a> ## (1) Initial Setup ### Install pre-requisite dependencies ``` # !pip install requests # !pip install tabulate # !pip install future ``` ### Install H2O in Python ``` # !pip install -f http://h2o-release.s3.amazonaws.com/h2o/latest_stable_Py.html h2o ``` ### Install MLflow ``` # !pip install mlflow ``` ### Import dependencies and datasets ``` # Import libraries import h2o from h2o.automl import H2OAutoML, get_leaderboard import mlflow import mlflow.h2o from mlflow.tracking import MlflowClient from mlflow.entities import ViewType import pandas as pd import json from sklearn.metrics import f1_score, accuracy_score pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) import warnings warnings.filterwarnings("ignore") ``` ### Initiate H2O cluster ``` # Start the H2O cluster (locally) h2o.init() ``` ### Setup MLflow - First open Powershell terminal and change path to the directory hosting this notebook - Enter `mlflow ui` to initiate MLFlow server - Once done, access the MLFlow UI served on http://127.0.0.1:5000 ``` # Initialize MLFlow client client = MlflowClient() # Set up MlFlow experiment experiment_name = 'automl-insurance' try: experiment_id = mlflow.create_experiment(experiment_name) experiment = client.get_experiment_by_name(experiment_name) except: experiment = client.get_experiment_by_name(experiment_name) mlflow.set_experiment(experiment_name) # Print experiment details print(f"Name: {experiment_name}") print(f"Experiment_id: {experiment.experiment_id}") print(f"Artifact Location: {experiment.artifact_location}") print(f"Tags: {experiment.tags}") print(f"Lifecycle_stage: {experiment.lifecycle_stage}") print(f"Tracking uri: {mlflow.get_tracking_uri()}") ``` ___ <a name="automl"></a> ## (2) H2O AutoML Training with MLFlow Tracking ### Import training data - Not splitting further into train/val set because 5-fold cross-val is applied by default in the AutoML training ``` # Import data directly as H2O frame main_frame = h2o.import_file(path='data/processed/train.csv') # Save data types of columns in H2O frame (for matching with test set during prediction) with open('data/processed/train_col_types.json', 'w') as fp: json.dump(main_frame.types, fp) # Alternatively, can first import as pandas csv, then convert to H2O frame # main_df = pd.read_csv('data/processed/train.csv') # main_frame = h2o.H2OFrame(main_df) # Set predictor and target columns target = 'Response' predictors = [n for n in main_frame.col_names if n != target] # Factorize target variable so that autoML tackles classification problem (instead of regression) main_frame[target] = main_frame[target].asfactor() # Visualize H2O frame structure main_frame.head() ``` ### Start H2O AutoML training with MLflow tracking ``` # Wrap autoML training with MLflow with mlflow.start_run(): aml = H2OAutoML( max_models=13, # Run AutoML for n base models seed=42, balance_classes=True, # Our target classes are imbalanced, so we set this to True sort_metric='logloss', # Sort models by logloss (main metric for multi-classification) verbosity='info', # Turn on verbose info exclude_algos = ['GLM', 'DRF'], # Specify which algorithms to exclude ) aml.train(x=predictors, y=target, training_frame=main_frame) # Set metrics to log mlflow.log_metric("log_loss", aml.leader.logloss()) mlflow.log_metric("AUC", aml.leader.auc()) # Log best model (mlflow.h2o module provides API for logging & loading H2O models) mlflow.h2o.log_model(aml.leader, artifact_path="model" ) model_uri = mlflow.get_artifact_uri("model") print(model_uri) # Print and view AutoML Leaderboard lb = get_leaderboard(aml, extra_columns='ALL') print(lb.head(rows=lb.nrows)) # Get IDs of current experiment run exp_id = experiment.experiment_id run_id = mlflow.active_run().info.run_id # Save leaderboard as CSV lb_path = f'mlruns/{exp_id}/{run_id}/artifacts/model/leaderboard.csv' lb.as_data_frame().to_csv(lb_path, index=False) print(f'Leaderboard saved in {lb_path}') ``` ### View AutoML logs ``` # Get AutoML event log log = aml.event_log log ``` ### View best model ``` # Leader (best) model stored here aml.leader ``` #### Learning Curve Plot ``` # Display learning curve learning_curve_plot = aml.leader.learning_curve_plot() ``` ___ <a name="predict"></a> ## (3) Predict with H2O AutoML Best Model ### Prepare test data ``` # Import test data test_frame = h2o.import_file(path='data/processed/test.csv') # Drop ID column for test set X_test_frame = test_frame.drop('Response') y_test_frame = test_frame[:, 'Response'] ``` ### Load leader model from MLflow saved in artifacts ``` # Get dataframe of all runs all_experiments = [exp.experiment_id for exp in client.list_experiments()] runs = mlflow.search_runs(experiment_ids=all_experiments, run_view_type=ViewType.ALL) # Identify best model (experiment id and run id) amongst all runs in the experiment run_id, exp_id = runs.loc[runs['metrics.log_loss'].idxmin()]['run_id'], runs.loc[runs['metrics.log_loss'].idxmin()]['experiment_id'] run_id, exp_id # Load best model (AutoML leader) best_model = mlflow.h2o.load_model(f"mlruns/{exp_id}/{run_id}/artifacts/model/") # Generate predictions with best model (output is H2O frame) preds_frame = best_model.predict(X_test_frame) # Get y values (ground truth and predicted) y_pred = preds_frame.as_data_frame()['predict'] y_true = y_test_frame.as_data_frame()['Response'] ``` ### Get Performance Metrics ``` from sklearn.metrics import f1_score, accuracy_score f1_score(y_true, y_pred) accuracy_score(y_true, y_pred) ``` ___ <a name="explain"></a> ## (4) H2O Model Explainability - More info: https://docs.h2o.ai/h2o/latest-stable/h2o-docs/explain.html#output-explanations
github_jupyter
# !pip install requests # !pip install tabulate # !pip install future # !pip install -f http://h2o-release.s3.amazonaws.com/h2o/latest_stable_Py.html h2o # !pip install mlflow # Import libraries import h2o from h2o.automl import H2OAutoML, get_leaderboard import mlflow import mlflow.h2o from mlflow.tracking import MlflowClient from mlflow.entities import ViewType import pandas as pd import json from sklearn.metrics import f1_score, accuracy_score pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) import warnings warnings.filterwarnings("ignore") # Start the H2O cluster (locally) h2o.init() # Initialize MLFlow client client = MlflowClient() # Set up MlFlow experiment experiment_name = 'automl-insurance' try: experiment_id = mlflow.create_experiment(experiment_name) experiment = client.get_experiment_by_name(experiment_name) except: experiment = client.get_experiment_by_name(experiment_name) mlflow.set_experiment(experiment_name) # Print experiment details print(f"Name: {experiment_name}") print(f"Experiment_id: {experiment.experiment_id}") print(f"Artifact Location: {experiment.artifact_location}") print(f"Tags: {experiment.tags}") print(f"Lifecycle_stage: {experiment.lifecycle_stage}") print(f"Tracking uri: {mlflow.get_tracking_uri()}") # Import data directly as H2O frame main_frame = h2o.import_file(path='data/processed/train.csv') # Save data types of columns in H2O frame (for matching with test set during prediction) with open('data/processed/train_col_types.json', 'w') as fp: json.dump(main_frame.types, fp) # Alternatively, can first import as pandas csv, then convert to H2O frame # main_df = pd.read_csv('data/processed/train.csv') # main_frame = h2o.H2OFrame(main_df) # Set predictor and target columns target = 'Response' predictors = [n for n in main_frame.col_names if n != target] # Factorize target variable so that autoML tackles classification problem (instead of regression) main_frame[target] = main_frame[target].asfactor() # Visualize H2O frame structure main_frame.head() # Wrap autoML training with MLflow with mlflow.start_run(): aml = H2OAutoML( max_models=13, # Run AutoML for n base models seed=42, balance_classes=True, # Our target classes are imbalanced, so we set this to True sort_metric='logloss', # Sort models by logloss (main metric for multi-classification) verbosity='info', # Turn on verbose info exclude_algos = ['GLM', 'DRF'], # Specify which algorithms to exclude ) aml.train(x=predictors, y=target, training_frame=main_frame) # Set metrics to log mlflow.log_metric("log_loss", aml.leader.logloss()) mlflow.log_metric("AUC", aml.leader.auc()) # Log best model (mlflow.h2o module provides API for logging & loading H2O models) mlflow.h2o.log_model(aml.leader, artifact_path="model" ) model_uri = mlflow.get_artifact_uri("model") print(model_uri) # Print and view AutoML Leaderboard lb = get_leaderboard(aml, extra_columns='ALL') print(lb.head(rows=lb.nrows)) # Get IDs of current experiment run exp_id = experiment.experiment_id run_id = mlflow.active_run().info.run_id # Save leaderboard as CSV lb_path = f'mlruns/{exp_id}/{run_id}/artifacts/model/leaderboard.csv' lb.as_data_frame().to_csv(lb_path, index=False) print(f'Leaderboard saved in {lb_path}') # Get AutoML event log log = aml.event_log log # Leader (best) model stored here aml.leader # Display learning curve learning_curve_plot = aml.leader.learning_curve_plot() # Import test data test_frame = h2o.import_file(path='data/processed/test.csv') # Drop ID column for test set X_test_frame = test_frame.drop('Response') y_test_frame = test_frame[:, 'Response'] # Get dataframe of all runs all_experiments = [exp.experiment_id for exp in client.list_experiments()] runs = mlflow.search_runs(experiment_ids=all_experiments, run_view_type=ViewType.ALL) # Identify best model (experiment id and run id) amongst all runs in the experiment run_id, exp_id = runs.loc[runs['metrics.log_loss'].idxmin()]['run_id'], runs.loc[runs['metrics.log_loss'].idxmin()]['experiment_id'] run_id, exp_id # Load best model (AutoML leader) best_model = mlflow.h2o.load_model(f"mlruns/{exp_id}/{run_id}/artifacts/model/") # Generate predictions with best model (output is H2O frame) preds_frame = best_model.predict(X_test_frame) # Get y values (ground truth and predicted) y_pred = preds_frame.as_data_frame()['predict'] y_true = y_test_frame.as_data_frame()['Response'] from sklearn.metrics import f1_score, accuracy_score f1_score(y_true, y_pred) accuracy_score(y_true, y_pred)
0.752559
0.928051
# ex1-Export A NetCDF Data As A Time Series of GeoTiff Images The command-line tool of [gdal_translate](https://gdal.org/programs/gdal_translate.html#gdal-translate) provided by GDAL should be the most commonly used option for converting raster data between different formats, while [CDO](https://code.mpimet.mpg.de/projects/cdo/) is another excellent command line suite for manipulating and analysing climate data. This time we will apply them to export each time slice from a NetCDF data as a single GeoTiff image(s). we still use the monthly mean sea level pressure of [mslp.mon.mean.nc](http://www.esrl.noaa.gov/psd/data/gridded/data.ncep.reanalysis2.surface.html) as an example. Before moving on, make sure you have GDAL and CDO installed on your computer or set up a new experimental environment using docker just like me. The following is my ***Dockerfile***. ``` RG BASE_CONTAINER=andrejreznik/python-gdal:stable FROM $BASE_CONTAINER LABEL maintainer="[email protected]" RUN apt-get update && apt-get install -y --no-install-recommends cdo && \ rm -rf /var/lib/apt/lists/* ``` ## Check data information We can use [***gdalinfo***] or [***cdo info***]. Here we use gdalinfo as we mainly use gdal_translate to convert data formats. ``` gdalinfo mslp.mon.mean.nc ``` The above command will output much information on the computer screen, where we can find that each time slice has been seen as a single ***band***. We take the last band as an example. ![img](img/gdalinfo.png) It is worth noting there are two extra parameters of ***scale_factor*** and ***add_offset***. According to [NetCDF Attribute Conventions](http://www.bic.mni.mcgill.ca/users/sean/Docs/netcdf/guide.txn_18.html): - scale_factor > If present for a variable, the data are to be multiplied by this factor after the data are read by the application that accesses the data. - add_offset > If present for a variable, this number is to be added to the data after it is read by the application that accesses the data. If both scale_factor and add_offset attributes are present, the data are first scaled before the offset is added. The attributes scale_factor and add_offset can be used together to provide simple data compression to store low-resolution floating-point data as small integers in a netCDF file. When scaled data are written, the application should first subtract the offset and then divide by the scale factor. When scale_factor and add_offset are used for packing, the associated variable (containing the packed data) is typically of type byte or short, whereas the unpacked values are intended to be of type float or double. The attributes scale_factor and add_offset should both be of the type intended for the unpacked data, e.g. float or double. # Convert NetCDF to GeoTiff image(s) Keep in mind that the band number starts from ***1*** in gdal_translate. The command option of ***-unscale*** can be used to tell gdal_translate to apply the scale/offset metadata for the bands to convert scaled values to unscaled values. Then we can easily convert one band (or a time slice) of the NetCDF data into a geotiff format just use the command: ``` gdal_translate -ot Float64 NETCDF:mslp.mon.mean.nc:mslp -b 1 -unscale "mslp_01.tif" ``` However, we'd like to export all time slices with meaningful names such as ***mslp_yyyy-mm.tif***. Now it is show time for CDO. We can get all date-time information of the data using ``` cdo showdate mslp.mon.mean.nc ``` The output looks like ![dates](img/cdoshowdate.png) Now I think you should already figure out how to finish the left task. Yes, put them into a bash script. ``` #!/bin/bash infile=mslp.mon.mean.nc band=1 for idate in $(cdo showdate $infile) do echo $band ym="${idate:0:7}" gdal_translate -ot Float64 NETCDF:$infile:mslp -b $band -unscale "mslp_$ym.tif" ((band++)) done echo All done ``` Done! Congratulations! Now you already get a new option to Export Each Time Slice From A NetCDF Data As A Single Raster. ![list](img/filelist.png) If you like other methods, you can refer this [python tutorial](https://www.linkedin.com/pulse/convert-netcdf4-file-geotiff-using-python-chonghua-yin/). ## References https://code.mpimet.mpg.de/projects/cdo/ https://gdal.org/programs/gdal_translate.html#gdal-translate NCEP-DOE AMIP-II Reanalysis (R-2): M. Kanamitsu, W. Ebisuzaki, J. Woollen, S-K Yang, J.J. Hnilo, M. Fiorino, and G. L. Potter. 1631-1643, Nov 2002, Bulletin of the American Meteorological Society.
github_jupyter
RG BASE_CONTAINER=andrejreznik/python-gdal:stable FROM $BASE_CONTAINER LABEL maintainer="[email protected]" RUN apt-get update && apt-get install -y --no-install-recommends cdo && \ rm -rf /var/lib/apt/lists/* gdalinfo mslp.mon.mean.nc gdal_translate -ot Float64 NETCDF:mslp.mon.mean.nc:mslp -b 1 -unscale "mslp_01.tif" cdo showdate mslp.mon.mean.nc #!/bin/bash infile=mslp.mon.mean.nc band=1 for idate in $(cdo showdate $infile) do echo $band ym="${idate:0:7}" gdal_translate -ot Float64 NETCDF:$infile:mslp -b $band -unscale "mslp_$ym.tif" ((band++)) done echo All done
0.299412
0.904397
``` # File is to define the metrics to evaluate predictions on import math import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import imageio import os import time def calc_dists(lines1, lines2, dist_type): x1s = lines1[:, 2] y1s = lines1[:, 3] x2s = lines2[:, 2] y2s = lines2[:, 3] if dist_type == "L2": return np.mean(np.sqrt(np.square(y2s - y1s) + np.square(x2s - x1s))) return np.mean(np.abs(y2s-y1s) + np.abs(x2s-x1s)) def extract_trajectory(data, ped_id): return data[data[:, 1] == ped_id, :] def overall_mean(gt, pred, ped_id, dist_type="L2"): traj1 = extract_trajectory(gt, ped_id) traj2 = extract_trajectory(pred, ped_id) dist = calc_dists(traj1, traj2, dist_type) / 12 return dist def predicted_mean(gt, pred, ped_id, dist_type="L2"): traj1 = extract_trajectory(gt, ped_id)[8:, :] # first 8 are ignored since those are given traj2 = extract_trajectory(pred, ped_id)[8:, :] dist = calc_dists(traj1, traj2, dist_type) / 12. return dist def final_mean(gt, pred, ped_id, dist_type="L2"): traj1 = extract_trajectory(gt, ped_id)[-1, :].reshape((1,4)) # only use last one traj2 = extract_trajectory(pred, ped_id)[-1, :].reshape((1,4)) dist = calc_dists(traj1, traj2, dist_type) return dist def get_mean_fn(mean_type): if mean_type == "overall": return overall_mean if mean_type == "predicted": return predicted_mean return final_mean # mean_type = overall, predicted, final # dist_type = L2, L1 def prediction_error(gt, pred, mean_type, dist_type, lin, linmap): peds = np.sort(np.unique(gt[:, 1])) npeds = peds.shape[0] dist = 0.0 for i in range(npeds): if lin == 0 or (lin == 1 and linmap[peds[i]]) or (lin == 2 and not linmap[peds[i]]): dist += get_mean_fn(mean_type)(gt, pred, peds[i], dist_type) dist /= npeds return dist num_metrics = 4 # 6 def file_error(gt, pred, lin, linmap): return [#prediction_error(gt, pred, "overall", "L2"), prediction_error(gt, pred, "predicted", "L2", lin, linmap), prediction_error(gt, pred, "final", "L2", lin, linmap), #prediction_error(gt, pred, "overall", "L1"), prediction_error(gt, pred, "predicted", "L1", lin, linmap), prediction_error(gt, pred, "final", "L1", lin, linmap)] def print_leaderboard(gt_dir, predict_dir, lin): datasets = {} datasets[".overall"] = np.zeros((num_metrics,)) overallcount = 0.0 for folder in os.listdir(gt_dir): if "." in folder: continue datasets[folder] = {} datasets[folder][".err"] = np.zeros((num_metrics,)) count = 0.0 for fname in os.listdir(gt_dir + folder): if not fname.endswith(".txt"): continue #print "Processing", fname gtfile = gt_dir + folder + "/" + fname predfile = predict_dir + folder + "/" + fname gt = np.loadtxt(gtfile) pred = np.loadtxt(predfile) #print gt.shape #print pred.shape fname2 = gt_dir + folder + "/" + fname pname = fname2[fname2.rfind("/")+1:fname2.rfind(".")] datasets[folder][fname] = file_error(gt, pred, lin, linear_map[pname]) datasets[folder][".err"] += np.asarray(datasets[folder][fname]) datasets[".overall"] += np.asarray(datasets[folder][fname]) count += 1 overallcount += 1 datasets[folder][".err"] /= count datasets[".overall"] /= overallcount return datasets def predictions_leaderboard(gtfile, predictfile, lin): if lin == 0: print "All Sequences" elif lin == 1: print "Linear Sequences" else: print "Non-Linear Sequences" datasets = print_leaderboard(gtfile, predictfile, lin) # for folder in datasets: # print folder # for fname in datasets[folder]: # print fname #print datasets formatstr = '{: <25}{: <12}{: <12}{: <12}{: <12}' #formatstr = '{: <25}{: <12}{: <12}{: <12}{: <12}{: <12}{: <12}' # print formatstr.format("File", "O-L2", "P-L2", "F-L2", "O-L1", "P-L1", "F-L1") print formatstr.format("File", "P-L2", "F-L2", "P-L1", "F-L1") for folder in datasets: if folder == ".overall": es = datasets[folder] # print formatstr.format("Overall", round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5), round(es[4], 5), round(es[5], 5)) print formatstr.format("Overall", round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5)) continue es = datasets[folder][".err"] print formatstr.format(folder + "/", round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5)) for fname in datasets[folder]: if fname == ".err": continue # print "\t",fname ffname = " " + fname #" " + fname if fname != ".err" else folder + "/" es = datasets[folder][fname] # print ffname print formatstr.format(ffname, round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5)) # print formatstr.format(ffname, round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5), round(es[4], 5), round(es[5], 5)) predictions_leaderboard("../data/challenges/1/gt/", "../data/challenges/1/predict_sf_ewap/", lin=2) # 0 is both, 1 is just linear, 2 is just non-linear predictions_leaderboard("../data/challenges/1/gt/", "../data/challenges/1/predict_sf_attr/", lin=2) # 0 is both, 1 is just linear, 2 is just non-linear predictions_leaderboard("../data/challenges/1/gt/", "../data/challenges/1/predict_linear/", lin=2) # 0 is both, 1 is just linear, 2 is just non-linear predictions_leaderboard("../data/challenges/1/gt/", "../data/challenges/1/predict_igp/", lin=2) # 0 is both, 1 is just linear, 2 is just non-linear # print file_error( # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt"), # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt")) # print file_error( # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt"), # np.loadtxt("../data/challenges/1/predict_lin/crowds/crowds_zara01.txt")) folders = ["biwi", "crowds", "crowds"] fnames = ["biwi_eth", "uni_examples", "crowds_zara01"] for lin in range(3): if lin == 0: print "All sequences" elif lin == 1: print "Just Linear sequences" elif lin == 2: print "Just non-linear sequences" else: print "NOT SURE" for i in range(len(folders)): print fnames[i] folder = folders[i] fname = fnames[i] print "SF EWAP" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_sf_ewap/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) print "LINEAR" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_linear/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) print "SF ATTR" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_sf_attr/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) print "IGP" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_igp/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) print "NAIVE-LSTM" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_naive/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) import numpy.polynomial.polynomial as poly def trajectory_distance(traj): total_err = 0.0 rng = range(20) x = traj[:, 2] y = traj[:, 3] xcoefs = poly.polyfit(rng, x, 1) # fit line for x ycoefs = poly.polyfit(rng, y, 1) # fit line for y for i in range(0, traj.shape[0]): pred_x = xcoefs[0] + i*xcoefs[1] pred_y = ycoefs[0] + i*ycoefs[1] total_err += np.abs(pred_x - traj[i,2]) + np.abs(pred_y - traj[i,3]) return total_err linear_map = {} threshold = 5.0 gt_dir = "../data/challenges/1/gt/" for folder in os.listdir(gt_dir): if "." in folder: continue for fname in os.listdir(gt_dir + folder): if not fname.endswith(".txt"): continue fname = gt_dir + folder + "/" + fname print "Processing", fname pname = fname[fname.rfind("/")+1:fname.rfind(".")] if not (pname in linear_map): linear_map[pname] = {} dists = [] gt = np.loadtxt(fname) peds = np.sort(np.unique(gt[:, 1])) npeds = peds.shape[0] for i in range(npeds): traj = extract_trajectory(gt, peds[i]) dist = trajectory_distance(traj) linear_map[pname][peds[i]] = dist < threshold dists.append(dist) dists = np.asarray(dists) plt.hist(dists, normed=True, bins=30, edgecolor='black') plt.title(pname) plt.savefig("out/linear/" + pname + ".png") plt.show() plt.close() print linear_map def extract_positions(data, frame): return data[data[:, 0] == frame, :] def count_overlap(people, threshold=0.5): dists = [] count = 0 for i in range(people.shape[0]-1): for j in range(i+1, people.shape[0]): dist = np.mean(np.sqrt(np.square(people[i,3] - people[j,3]) + np.square(people[i,2] - people[j,2]))) if dist < threshold: count += 1 dists.append(dist) return dists, count gt_dir = "../data/challenges/1/gt/" for folder in os.listdir(gt_dir): if "." in folder: continue for fname in os.listdir(gt_dir + folder): if not fname.endswith(".txt"): continue fname = gt_dir + folder + "/" + fname print "Processing", fname pname = fname[fname.rfind("/")+1:fname.rfind(".")] gt = np.loadtxt(fname) frames = np.sort(np.unique(gt[:, 0])) nframes = frames.shape[0] dists = [] count = 0 total = 0 threshold = 0.5 for i in range(nframes): people = extract_positions(gt, frames[i]) dist, counts = count_overlap(people, threshold=threshold) dists.extend(dist) count += counts total += len(dists) dists = np.asarray(dists) plt.hist(dists, normed=True, bins=50, edgecolor='black') plt.title(pname) plt.savefig("out/collisions/" + pname + ".png") plt.show() plt.close() print "Threshold", threshold, "Count", count, "Total", total, "Percentage", round(count*100./total, 3) # print dists gt_dir = "../data/challenges/1/predict_gt/" models = ["gt", "predict_igp", "predict_linear", "predict_sf_ewap", "predict_sf_attr"] for folder in os.listdir(gt_dir): if "." in folder: continue for fname in os.listdir(gt_dir + folder): if not fname.endswith(".txt"): continue for modelname in models: fname2 = gt_dir.replace("predict_gt", modelname) + folder + "/" + fname pname = fname2[fname2.rfind("/")+1:fname2.rfind(".")] gt = np.loadtxt(fname2) frames = np.sort(np.unique(gt[:, 0])) nframes = frames.shape[0] dists = [] count = 0 total = 0 threshold = 0.5 for i in range(nframes): people = extract_positions(gt, frames[i]) dist, counts = count_overlap(people, threshold=threshold) dists.extend(dist) count += counts total += len(dists) dists = np.asarray(dists) print modelname, pname, round(count*100./total, 3), count, total # print "\tPercentage", round(count*100./total, 3), "Count", count, "Total", total # print dists # print file_error( # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt"), # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt")) # print file_error( # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt"), # np.loadtxt("../data/challenges/1/predict_lin/crowds/crowds_zara01.txt")) folders = ["biwi", "crowds", "crowds"] fnames = ["biwi_eth", "uni_examples", "crowds_zara01"] models = ["gt", "predict_linear", "predict_sf_ewap", "predict_sf_attr", "predict_igp", "predict_naive"] for i in range(len(folders)): print fnames[i] folder = folders[i] fname = fnames[i] for model in models: gt = np.loadtxt("../data/challenges/1/" + model + "/" + folder + "/" + fname + ".txt") frames = np.sort(np.unique(gt[:, 0])) nframes = frames.shape[0] dists = [] count = 0 total = 0 threshold = 0.5 for i in range(nframes): people = extract_positions(gt, frames[i]) dist, counts = count_overlap(people, threshold=threshold) dists.extend(dist) count += counts total += len(dists) dists = np.asarray(dists) print "\t", model, pname, round(count*100./total, 3), count, total ```
github_jupyter
# File is to define the metrics to evaluate predictions on import math import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import imageio import os import time def calc_dists(lines1, lines2, dist_type): x1s = lines1[:, 2] y1s = lines1[:, 3] x2s = lines2[:, 2] y2s = lines2[:, 3] if dist_type == "L2": return np.mean(np.sqrt(np.square(y2s - y1s) + np.square(x2s - x1s))) return np.mean(np.abs(y2s-y1s) + np.abs(x2s-x1s)) def extract_trajectory(data, ped_id): return data[data[:, 1] == ped_id, :] def overall_mean(gt, pred, ped_id, dist_type="L2"): traj1 = extract_trajectory(gt, ped_id) traj2 = extract_trajectory(pred, ped_id) dist = calc_dists(traj1, traj2, dist_type) / 12 return dist def predicted_mean(gt, pred, ped_id, dist_type="L2"): traj1 = extract_trajectory(gt, ped_id)[8:, :] # first 8 are ignored since those are given traj2 = extract_trajectory(pred, ped_id)[8:, :] dist = calc_dists(traj1, traj2, dist_type) / 12. return dist def final_mean(gt, pred, ped_id, dist_type="L2"): traj1 = extract_trajectory(gt, ped_id)[-1, :].reshape((1,4)) # only use last one traj2 = extract_trajectory(pred, ped_id)[-1, :].reshape((1,4)) dist = calc_dists(traj1, traj2, dist_type) return dist def get_mean_fn(mean_type): if mean_type == "overall": return overall_mean if mean_type == "predicted": return predicted_mean return final_mean # mean_type = overall, predicted, final # dist_type = L2, L1 def prediction_error(gt, pred, mean_type, dist_type, lin, linmap): peds = np.sort(np.unique(gt[:, 1])) npeds = peds.shape[0] dist = 0.0 for i in range(npeds): if lin == 0 or (lin == 1 and linmap[peds[i]]) or (lin == 2 and not linmap[peds[i]]): dist += get_mean_fn(mean_type)(gt, pred, peds[i], dist_type) dist /= npeds return dist num_metrics = 4 # 6 def file_error(gt, pred, lin, linmap): return [#prediction_error(gt, pred, "overall", "L2"), prediction_error(gt, pred, "predicted", "L2", lin, linmap), prediction_error(gt, pred, "final", "L2", lin, linmap), #prediction_error(gt, pred, "overall", "L1"), prediction_error(gt, pred, "predicted", "L1", lin, linmap), prediction_error(gt, pred, "final", "L1", lin, linmap)] def print_leaderboard(gt_dir, predict_dir, lin): datasets = {} datasets[".overall"] = np.zeros((num_metrics,)) overallcount = 0.0 for folder in os.listdir(gt_dir): if "." in folder: continue datasets[folder] = {} datasets[folder][".err"] = np.zeros((num_metrics,)) count = 0.0 for fname in os.listdir(gt_dir + folder): if not fname.endswith(".txt"): continue #print "Processing", fname gtfile = gt_dir + folder + "/" + fname predfile = predict_dir + folder + "/" + fname gt = np.loadtxt(gtfile) pred = np.loadtxt(predfile) #print gt.shape #print pred.shape fname2 = gt_dir + folder + "/" + fname pname = fname2[fname2.rfind("/")+1:fname2.rfind(".")] datasets[folder][fname] = file_error(gt, pred, lin, linear_map[pname]) datasets[folder][".err"] += np.asarray(datasets[folder][fname]) datasets[".overall"] += np.asarray(datasets[folder][fname]) count += 1 overallcount += 1 datasets[folder][".err"] /= count datasets[".overall"] /= overallcount return datasets def predictions_leaderboard(gtfile, predictfile, lin): if lin == 0: print "All Sequences" elif lin == 1: print "Linear Sequences" else: print "Non-Linear Sequences" datasets = print_leaderboard(gtfile, predictfile, lin) # for folder in datasets: # print folder # for fname in datasets[folder]: # print fname #print datasets formatstr = '{: <25}{: <12}{: <12}{: <12}{: <12}' #formatstr = '{: <25}{: <12}{: <12}{: <12}{: <12}{: <12}{: <12}' # print formatstr.format("File", "O-L2", "P-L2", "F-L2", "O-L1", "P-L1", "F-L1") print formatstr.format("File", "P-L2", "F-L2", "P-L1", "F-L1") for folder in datasets: if folder == ".overall": es = datasets[folder] # print formatstr.format("Overall", round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5), round(es[4], 5), round(es[5], 5)) print formatstr.format("Overall", round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5)) continue es = datasets[folder][".err"] print formatstr.format(folder + "/", round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5)) for fname in datasets[folder]: if fname == ".err": continue # print "\t",fname ffname = " " + fname #" " + fname if fname != ".err" else folder + "/" es = datasets[folder][fname] # print ffname print formatstr.format(ffname, round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5)) # print formatstr.format(ffname, round(es[0], 5), round(es[1], 5), round(es[2], 5), round(es[3], 5), round(es[4], 5), round(es[5], 5)) predictions_leaderboard("../data/challenges/1/gt/", "../data/challenges/1/predict_sf_ewap/", lin=2) # 0 is both, 1 is just linear, 2 is just non-linear predictions_leaderboard("../data/challenges/1/gt/", "../data/challenges/1/predict_sf_attr/", lin=2) # 0 is both, 1 is just linear, 2 is just non-linear predictions_leaderboard("../data/challenges/1/gt/", "../data/challenges/1/predict_linear/", lin=2) # 0 is both, 1 is just linear, 2 is just non-linear predictions_leaderboard("../data/challenges/1/gt/", "../data/challenges/1/predict_igp/", lin=2) # 0 is both, 1 is just linear, 2 is just non-linear # print file_error( # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt"), # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt")) # print file_error( # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt"), # np.loadtxt("../data/challenges/1/predict_lin/crowds/crowds_zara01.txt")) folders = ["biwi", "crowds", "crowds"] fnames = ["biwi_eth", "uni_examples", "crowds_zara01"] for lin in range(3): if lin == 0: print "All sequences" elif lin == 1: print "Just Linear sequences" elif lin == 2: print "Just non-linear sequences" else: print "NOT SURE" for i in range(len(folders)): print fnames[i] folder = folders[i] fname = fnames[i] print "SF EWAP" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_sf_ewap/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) print "LINEAR" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_linear/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) print "SF ATTR" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_sf_attr/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) print "IGP" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_igp/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) print "NAIVE-LSTM" print file_error( np.loadtxt("../data/challenges/1/gt/" + folder + "/" + fname + ".txt"), np.loadtxt("../data/challenges/1/predict_naive/" + folder + "/" + fname + ".txt"), lin, linear_map[fname] ) import numpy.polynomial.polynomial as poly def trajectory_distance(traj): total_err = 0.0 rng = range(20) x = traj[:, 2] y = traj[:, 3] xcoefs = poly.polyfit(rng, x, 1) # fit line for x ycoefs = poly.polyfit(rng, y, 1) # fit line for y for i in range(0, traj.shape[0]): pred_x = xcoefs[0] + i*xcoefs[1] pred_y = ycoefs[0] + i*ycoefs[1] total_err += np.abs(pred_x - traj[i,2]) + np.abs(pred_y - traj[i,3]) return total_err linear_map = {} threshold = 5.0 gt_dir = "../data/challenges/1/gt/" for folder in os.listdir(gt_dir): if "." in folder: continue for fname in os.listdir(gt_dir + folder): if not fname.endswith(".txt"): continue fname = gt_dir + folder + "/" + fname print "Processing", fname pname = fname[fname.rfind("/")+1:fname.rfind(".")] if not (pname in linear_map): linear_map[pname] = {} dists = [] gt = np.loadtxt(fname) peds = np.sort(np.unique(gt[:, 1])) npeds = peds.shape[0] for i in range(npeds): traj = extract_trajectory(gt, peds[i]) dist = trajectory_distance(traj) linear_map[pname][peds[i]] = dist < threshold dists.append(dist) dists = np.asarray(dists) plt.hist(dists, normed=True, bins=30, edgecolor='black') plt.title(pname) plt.savefig("out/linear/" + pname + ".png") plt.show() plt.close() print linear_map def extract_positions(data, frame): return data[data[:, 0] == frame, :] def count_overlap(people, threshold=0.5): dists = [] count = 0 for i in range(people.shape[0]-1): for j in range(i+1, people.shape[0]): dist = np.mean(np.sqrt(np.square(people[i,3] - people[j,3]) + np.square(people[i,2] - people[j,2]))) if dist < threshold: count += 1 dists.append(dist) return dists, count gt_dir = "../data/challenges/1/gt/" for folder in os.listdir(gt_dir): if "." in folder: continue for fname in os.listdir(gt_dir + folder): if not fname.endswith(".txt"): continue fname = gt_dir + folder + "/" + fname print "Processing", fname pname = fname[fname.rfind("/")+1:fname.rfind(".")] gt = np.loadtxt(fname) frames = np.sort(np.unique(gt[:, 0])) nframes = frames.shape[0] dists = [] count = 0 total = 0 threshold = 0.5 for i in range(nframes): people = extract_positions(gt, frames[i]) dist, counts = count_overlap(people, threshold=threshold) dists.extend(dist) count += counts total += len(dists) dists = np.asarray(dists) plt.hist(dists, normed=True, bins=50, edgecolor='black') plt.title(pname) plt.savefig("out/collisions/" + pname + ".png") plt.show() plt.close() print "Threshold", threshold, "Count", count, "Total", total, "Percentage", round(count*100./total, 3) # print dists gt_dir = "../data/challenges/1/predict_gt/" models = ["gt", "predict_igp", "predict_linear", "predict_sf_ewap", "predict_sf_attr"] for folder in os.listdir(gt_dir): if "." in folder: continue for fname in os.listdir(gt_dir + folder): if not fname.endswith(".txt"): continue for modelname in models: fname2 = gt_dir.replace("predict_gt", modelname) + folder + "/" + fname pname = fname2[fname2.rfind("/")+1:fname2.rfind(".")] gt = np.loadtxt(fname2) frames = np.sort(np.unique(gt[:, 0])) nframes = frames.shape[0] dists = [] count = 0 total = 0 threshold = 0.5 for i in range(nframes): people = extract_positions(gt, frames[i]) dist, counts = count_overlap(people, threshold=threshold) dists.extend(dist) count += counts total += len(dists) dists = np.asarray(dists) print modelname, pname, round(count*100./total, 3), count, total # print "\tPercentage", round(count*100./total, 3), "Count", count, "Total", total # print dists # print file_error( # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt"), # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt")) # print file_error( # np.loadtxt("../data/challenges/1/gt/crowds/crowds_zara01.txt"), # np.loadtxt("../data/challenges/1/predict_lin/crowds/crowds_zara01.txt")) folders = ["biwi", "crowds", "crowds"] fnames = ["biwi_eth", "uni_examples", "crowds_zara01"] models = ["gt", "predict_linear", "predict_sf_ewap", "predict_sf_attr", "predict_igp", "predict_naive"] for i in range(len(folders)): print fnames[i] folder = folders[i] fname = fnames[i] for model in models: gt = np.loadtxt("../data/challenges/1/" + model + "/" + folder + "/" + fname + ".txt") frames = np.sort(np.unique(gt[:, 0])) nframes = frames.shape[0] dists = [] count = 0 total = 0 threshold = 0.5 for i in range(nframes): people = extract_positions(gt, frames[i]) dist, counts = count_overlap(people, threshold=threshold) dists.extend(dist) count += counts total += len(dists) dists = np.asarray(dists) print "\t", model, pname, round(count*100./total, 3), count, total
0.239527
0.617224
# MAT281 ## Aplicaciones de la Matemática en la Ingeniería Puedes ejecutar este jupyter notebook de manera interactiva: [![Binder](../shared/images/jupyter_binder.png)](https://mybinder.org/v2/gh/sebastiandres/mat281_m02_introduccion/master?filepath=02_tabulando_datos/02_tabulando_datos.ipynb) [![Colab](../shared/images/jupyter_colab.png)](https://colab.research.google.com/github/sebastiandres/mat281_m02_analisis_datos/blob/master//02_tabulando_datos/02_tabulando_datos.ipynb) ## ¿Qué contenido aprenderemos? * Conocimiento básico sobre datos tabulares. * Etiqueta para la tabla. * Pivotear una tabla. * Despivotear una tabla. ## ¿Porqué aprenderemos eso? **1**. Resulta ser la tarea más habitual en la manipulación de datos. 25% del tiempo, mínimo. **2**. Visto en twitter: * Me: *So, first question, what's the oposite of pivoting a table?* * Interviewee: *Ehhh, melting a table?* * Me: *Hired!* ## 1. Datos tabulares Los datos tabulares, como su nombre lo indica, son aquellos que han sido representarse mediante una tabla que relaciona sus atributos y valores. ![wikitable](images/wikitable.gif) ## 1. Datos tabulares ¿Existen datos que no pueden representarse mediante una datos tabulares? No lo sé. En general, diría que los datos siempre pueden almacenarse como relaciones entre tablas, pero donde el problema que emerge para algunos problemas más complejos es el de la eficiencia en la búsqueda o almacenamiento de la información. ## 1. Datos tabulares Típicamente, existen ciertas columnas que son las columnas "identificadoras" y para las cuales *debiera* existir una única fila en la tabla. Por otra parte, existen columnas de datos, para las cuales pueden agregarse más información. |semestre | rut_estudiante|curso|prueba|nota| |-----------|----|--------|----|----| |2018-1S|15000000-6|mat281|Certamen_1|100| |2016-1S|15000000-6|mat281|Certamen_1|100| |2018-1S|18000000-9|mat281|Certamen_1|100| |2018-1S|15000000-6|fis120|Certamen_2|100| ¿Qué columnas son identificadoras en el ejemplo anterior? ¿Cuáles son los valores asociados? ## 1. Datos tabulares Consideremos el siguiente ejemplo: |semestre | rut_estudiante|curso|prueba|nota| |-----------|----|--------|----|----| |2018-1S|15000000-6|mat281|Certamen_1|100| |2016-1S|15000000-6|mat281|Certamen_1|100| |2018-1S|18000000-9|mat281|Certamen_1|100| |2018-1S|15000000-6|fis120|Certamen_2|100| |2018-1S|15000000-6|mat281|Certamen_1|10| |2016-1S|15000000-6|mat281|Certamen_1|-10| ¿La tabla está correcta? En el ejemplo anterior existen conflictos de información entre las distintas filas de la tabla, que es necesario resolver en el procesamiento de la información. ## 1. Datos tabulares Consideremos el siguiente ejemplo: |semestre | nombre_estudiante|curso|prueba|nota| |-----------|----|--------|----|----| |2018-1S|Sebastian Flores|mat281|Certamen_1|100| |2016-1S|Sebastian Andrés Flores |mat281|Certamen_1|100| |2018-1S|Sebastián Flores |mat281|Certamen_1|100| |2018-1S|Sebastian A. Flores B. |fis120|Certamen_2|100| ¿La tabla está correcta? En el ejemplo anterior una de las columnas que debe funcionar como identificador del estudiante no está normalizada y no permite una identificación única del estudiante. ## 1. Datos tabulares ### wide format versus long format Por ejemplo, el conjunto de datos [Zoo Data Set](http://archive.ics.uci.edu/ml/datasets/zoo) presenta las características de diversos animales, de los cuales presentamos las primeras 5 columnas. |animal_name|hair|feathers|eggs|milk| |-----------|----|--------|----|----| |antelope|1|0|0|1| |bear|1|0|0|1| |buffalo|1|0|0|1| |catfish|0|0|1|0| La tabla así presentada se encuentra en "wide format", es decir, donde los valores se extienden a través de las columnas. ## 1. Datos tabulares ### Wide format versus Long format Sería posible representar el mismo contenido anterior en "long format", es decir, donde los mismos valores se indicaran a través de las filas: |animal_name|characteristic|value| |-----------|----|--------| |antelope|hair |1| |antelope|feathers|0| |antelope|eggs|0| |antelope|milk|1| |...|...|...|...|..| |catfish|hair |0| |catfish|feathers|0| |catfish|eggs|1| |catfish|milk|0| ## 1. Datos tabulares ### Wide format versus Long format ![wide_and_long](images/wide_and_long.png) ## 2. Etiqueta Datos tabulares Antes de pasar a la tabulación de los datos, hablaremos de un tema pocas veces mencionado. ¿Cómo debemos dar formato a las tablas que compartimos? ¿Existe una *etiqueta* de datos tabulares? El formato y diseño de una tabla es crucial: si se hace bien, los datos son fáciles de revisar y comparar. Si se hace mal, dificulta y oscurece el entendimiento de la información. Al igual que con las visualizaciones, la finalidad de un buen formato de tablas es que la información pueda ser consumida de manera correcta y rápida por una persona externa, sin posibilidad de malinterpretaciones. ## 2. Etiqueta de Datos tabulares Las reglas básicas a considerar: 1. La fuente de todos los números debe ser uniforme y permitir comparaciones. 2. Las columnas deben tener separador de miles y, dentro de lo posible, la misma cantidad de decimales. Mientras menos, mejor. ## 2. Etiqueta de Datos tabulares 3. Mencionar las unidades en el nombre de la columna cuando sea relevante. 1. Datos numéricos deben estar alineados a la derecha. ## 2. Etiqueta de Datos tabulares 5. Datos de texto deben estar alineados a la izquierda. 1. Los títulos de las columnas deben estar alineados con sus datos. ## 2. Etiqueta de Datos tabulares 7. Usar el color de manera apropiada y frugalmente. 1. Entregar valores de agrupación según sea necesario. ## 2. Datos tabulares Ejemplo mal formato: ![ejemplo_1](images/2_tabla_mal_formato.png) ## 2. Datos tabulares Ejemplo un poco mejor formato: ![ejemplo_1](images/2_tabla_mejor_formato.png) ## 2. Datos tabulares Ejemplo formato final: ![ejemplo_1](images/2_tabla_mucho_mejor_formato.png) ## 3. Pivoteando una tabla El pivoteo de una tabla corresponde al paso de una tabla desde el "long format" al "wide format". Típicamente esto se realiza para poder comparar los valores que se obtienen para algún registro en particular, o para utilizar algunas herramientas de visualización básica que requieren dicho formato. ``` import pandas as pd import os df = pd.read_csv(os.path.join("data","terremotos.csv"), sep=",") df["Pais"] = df["Pais"].str.strip() df.head() df.describe(include="all") ``` ## 3. Pivoteando una tabla La tabla anterior no tiene bien definidas sus columnas de identificadores, puesto que en un año podría existir más de un terremoto por año: ``` df.groupby(["Año","Pais"]).count() ``` Si la tabla tiene un registro único por columnas identificadoras, pivotear la tabla no tiene problemas. Si existe más de un regisro por columnas identificadoras, es necesario usar una función de agrupación para seleccionar un valor representativo. Veamos que dice la documentación: ``` df.pivot_table? ``` Intentémoslo. ¿Que tal pivotear para contar el número de terremotos por año? ``` df.pivot_table(index="Año", values="Pais", aggfunc=pd.DataFrame.count)#.T ``` ¿Cuál fue la mayor magnitud en cada año? ``` df.pivot_table(index="Año", values="Magnitud", aggfunc=pd.np.max)#.T ``` ¿Cuál fue la mayor magnitud en cada año, en cada país? ``` df.pivot_table(index=["Año", "Pais"], values="Magnitud", aggfunc=pd.np.max) ``` ¿Cómo fueron los terremotos año a año en cada país? ``` df.pivot_table(index="Pais", columns="Año", values="Magnitud", aggfunc=pd.np.max) df.pivot_table(index="Pais", columns="Año", values="Magnitud", aggfunc=pd.np.max, fill_value="") ``` ## 3. Pivoteando una tabla En general, pivotear una tabla no es particularmente complicado pero requiere saber bien cuál es la pregunta que se desea responder, puesto que como vimos, a partir de una misma tabla en formato "long" es posible generar varias tablas de agregación distintas. Y como dice el dicho, la práctica hace al maestro. ## 4. Despivoteando una tabla **Disclaimer**: No conozco una mejor traducción. En inglés tampoco se han puesto de acuerdo. He visto: *melt*, *un-pivot* y *reverse-pivot*, entre otros. Hasta ahora nadie me ha corregido con una mejor traducción. Despivotear una tabla consiste en pasar del "wide format" al "long format". El proceso de "des-pivotear" típicamente se realiza para poder agregar nuevas columnas a la tabla, o ponerla en un formato que permita un análisis apropiado con herramientas de visualización más avanzadas. ## 4. Despivoteando una tabla Típicamente existen 2 opciones: 1. El valor indicado para la columna es único, y sólo se requiere definir correctamente las columnas. 2. El valor indicado por la columna no es único o requiere un procesamiento adicional, y se requiere una iteración más profunda. ## 4. Despivoteando una tabla ### 4.1 Valor único: definir las columnas necesarias ``` import pandas as pd columns = ["sala","Lu-8:00","Lu-9:00","Lu-10:00","Ma-8:00","Ma-9:00","Ma-10:00"] data = [ ["C201","mat1","mat1", "","","",""], ["C202", "", "", "","mat1","mat1", ""], ["C203","fis1","fis1","fis1","fis1","fis1","fis1"], ] df = pd.DataFrame(data=data, columns=columns) df ``` La documentación de melt es bastante explícita aunque no muy amigable: ``` df.melt? ``` Intentémoslo: ``` df df.melt(id_vars=["sala"]) # columnas identificadoras df.melt(id_vars=["sala"], # columnas identificadoras var_name="dia-hora", # nueva variable value_name="curso") # nombre de la columna para el valor de nueva variable df.melt(id_vars=["sala"], # columnas identificadoras value_vars=["Lu-8:00","Lu-9:00","Lu-10:00"], # columnas a considerar var_name="dia-hora", # nueva variable value_name="curso") # nombre de la columna para el valor de nueva variable ``` Después de despivotear puede ser necesario un poco de limpieza para evitar los valores vacíos. ``` # Despivotear el dataframe, renombrando columna indexadora y de valor df_melt = df.melt(id_vars=["sala"], var_name="dia-hora", value_name="curso") # Eliminar filas sin contenido y ordenarlas por nombre de sala y dia-hora. df_melt[df_melt.curso!=""].sort_values(["sala","dia-hora"]) ``` ## 4. Despivoteando una tabla ### 4.1 Valor único: definir las columnas necesarias Intentémoslo con un ejemplo más complejo ``` import pandas as pd columns = ["sala","dia","08:00","09:00","10:00"] data = [ ["C201","Lu", "mat1","mat1", ""], ["C201","Ma", "","",""], ["C202","Lu", "","",""], ["C202","Ma", "mat1","mat1", ""], ["C203","Lu", "fis1","fis1","fis1"], ["C203","Ma", "fis1","fis1","fis1"], ] df = pd.DataFrame(data=data, columns=columns) df # Despivotear incorrectamente la tabla df.melt(id_vars=["sala"], var_name="hora", value_name="curso") # Despivotear correctamente la tabla df.melt(id_vars=["sala", "dia"], var_name="hora", value_name="curso") # Despivotear correctamente la tabla df_melt = df.melt(id_vars=["sala", "dia"], var_name="hora", value_name="curso") df_melt[df_melt.curso!=""].sort_values(["sala","dia","hora"]) ``` ## 4. Despivoteando una tabla ### 4.1 Relaciones no únicas Consideremos el siguiente ejemplo: ``` import pandas as pd columns = ["sala","curso","Lu","Ma","hora"] data = [ ["C201","mat1","X","","8:00-10:00"], ["C202","mat1","","X","8:00-10:00"], ["C203","fis1","X","X","8:00-11:00"], ] df = pd.DataFrame(data=data, columns=columns) df ``` ¿Cómo podríamos des-pivotear la tabla anterior? No existe un camino único. Dependiendo de la complejidad de la operación podrían haber soluciones más faciles. #### Idea 1: Despivotear manualmente y generar un nuevo dataframe. * **Ventajas**: Si se puede es una solución directa y rápida. * **Desventaja**: requiere programación explícita de la tarea, no es reutilizable. ``` # Obtener el día lunes df_Lu = df.loc[df.Lu=="X", ["sala","curso","hora"]] df_Lu["dia"] = "Lu" df_Lu # Obtener el día martes df_Ma = df.loc[df.Ma=="X", ["sala","curso","hora"]] df_Ma["dia"] = "Ma" df_Ma # Juntar pd.concat([df_Lu,df_Ma]) ``` #### Idea 2: Iterar sobre las filas y generar contenido para un nuevo dataframe. * **Ventajas**: En general, fácil de codificar. * **Desventaja**: puede ser lento, es ineficiente. ``` # Forma de iterar sobre cada fila del dataframe for i, row in df.iterrows(): # Procesar cada fila print(row.sala, row.curso, row.Lu, row.Ma, row.hora) my_columns = ["sala","curso","dia","hora"] my_data = [] for i, df_row in df.iterrows(): # Procesar cada fila if df_row.Lu=="X": my_row = [df_row.sala, df_row.curso, "Lu", df_row.hora] my_data.append(my_row) if df_row.Ma=="X": my_row = [df_row.sala, df_row.curso, "Ma", df_row.hora] my_data.append(my_row) new_df = pd.DataFrame(data=my_data, columns=my_columns) new_df my_columns = ["sala","curso","dia","hora"] my_data = [] for i, df_row in df.iterrows(): # Procesar cada fila for col_aux in ["Lu","Ma"]: if df_row[col_aux]=="X": my_row = [df_row.sala, df_row.curso, col_aux, df_row.hora] my_data.append(my_row) new_df = pd.DataFrame(data=my_data, columns=my_columns) new_df ``` ## Referencias: * https://medium.com/mission-log/design-better-data-tables-430a30a00d8c * https://medium.com/@enricobergamini/creating-non-numeric-pivot-tables-with-python-pandas-7aa9dfd788a7 * https://nikgrozev.com/2015/07/01/reshaping-in-pandas-pivot-pivot-table-stack-and-unstack-explained-with-pictures/
github_jupyter
import pandas as pd import os df = pd.read_csv(os.path.join("data","terremotos.csv"), sep=",") df["Pais"] = df["Pais"].str.strip() df.head() df.describe(include="all") df.groupby(["Año","Pais"]).count() df.pivot_table? df.pivot_table(index="Año", values="Pais", aggfunc=pd.DataFrame.count)#.T df.pivot_table(index="Año", values="Magnitud", aggfunc=pd.np.max)#.T df.pivot_table(index=["Año", "Pais"], values="Magnitud", aggfunc=pd.np.max) df.pivot_table(index="Pais", columns="Año", values="Magnitud", aggfunc=pd.np.max) df.pivot_table(index="Pais", columns="Año", values="Magnitud", aggfunc=pd.np.max, fill_value="") import pandas as pd columns = ["sala","Lu-8:00","Lu-9:00","Lu-10:00","Ma-8:00","Ma-9:00","Ma-10:00"] data = [ ["C201","mat1","mat1", "","","",""], ["C202", "", "", "","mat1","mat1", ""], ["C203","fis1","fis1","fis1","fis1","fis1","fis1"], ] df = pd.DataFrame(data=data, columns=columns) df df.melt? df df.melt(id_vars=["sala"]) # columnas identificadoras df.melt(id_vars=["sala"], # columnas identificadoras var_name="dia-hora", # nueva variable value_name="curso") # nombre de la columna para el valor de nueva variable df.melt(id_vars=["sala"], # columnas identificadoras value_vars=["Lu-8:00","Lu-9:00","Lu-10:00"], # columnas a considerar var_name="dia-hora", # nueva variable value_name="curso") # nombre de la columna para el valor de nueva variable # Despivotear el dataframe, renombrando columna indexadora y de valor df_melt = df.melt(id_vars=["sala"], var_name="dia-hora", value_name="curso") # Eliminar filas sin contenido y ordenarlas por nombre de sala y dia-hora. df_melt[df_melt.curso!=""].sort_values(["sala","dia-hora"]) import pandas as pd columns = ["sala","dia","08:00","09:00","10:00"] data = [ ["C201","Lu", "mat1","mat1", ""], ["C201","Ma", "","",""], ["C202","Lu", "","",""], ["C202","Ma", "mat1","mat1", ""], ["C203","Lu", "fis1","fis1","fis1"], ["C203","Ma", "fis1","fis1","fis1"], ] df = pd.DataFrame(data=data, columns=columns) df # Despivotear incorrectamente la tabla df.melt(id_vars=["sala"], var_name="hora", value_name="curso") # Despivotear correctamente la tabla df.melt(id_vars=["sala", "dia"], var_name="hora", value_name="curso") # Despivotear correctamente la tabla df_melt = df.melt(id_vars=["sala", "dia"], var_name="hora", value_name="curso") df_melt[df_melt.curso!=""].sort_values(["sala","dia","hora"]) import pandas as pd columns = ["sala","curso","Lu","Ma","hora"] data = [ ["C201","mat1","X","","8:00-10:00"], ["C202","mat1","","X","8:00-10:00"], ["C203","fis1","X","X","8:00-11:00"], ] df = pd.DataFrame(data=data, columns=columns) df # Obtener el día lunes df_Lu = df.loc[df.Lu=="X", ["sala","curso","hora"]] df_Lu["dia"] = "Lu" df_Lu # Obtener el día martes df_Ma = df.loc[df.Ma=="X", ["sala","curso","hora"]] df_Ma["dia"] = "Ma" df_Ma # Juntar pd.concat([df_Lu,df_Ma]) # Forma de iterar sobre cada fila del dataframe for i, row in df.iterrows(): # Procesar cada fila print(row.sala, row.curso, row.Lu, row.Ma, row.hora) my_columns = ["sala","curso","dia","hora"] my_data = [] for i, df_row in df.iterrows(): # Procesar cada fila if df_row.Lu=="X": my_row = [df_row.sala, df_row.curso, "Lu", df_row.hora] my_data.append(my_row) if df_row.Ma=="X": my_row = [df_row.sala, df_row.curso, "Ma", df_row.hora] my_data.append(my_row) new_df = pd.DataFrame(data=my_data, columns=my_columns) new_df my_columns = ["sala","curso","dia","hora"] my_data = [] for i, df_row in df.iterrows(): # Procesar cada fila for col_aux in ["Lu","Ma"]: if df_row[col_aux]=="X": my_row = [df_row.sala, df_row.curso, col_aux, df_row.hora] my_data.append(my_row) new_df = pd.DataFrame(data=my_data, columns=my_columns) new_df
0.107836
0.718014
知识参考: [1] [强化学习之二:Q-Learning原理及表与神经网络的实现(Q-Learning with Tables and Neural Networks)](https://blog.csdn.net/qq_32690999/article/details/78996381) [2] [深度学习框架Keras学习系列(二):神经网络与BP算法(Neural Network and BP Algorithm)](https://blog.csdn.net/qq_32690999/article/details/78605371) [3] [TensorFlow API](https://www.tensorflow.org/versions/r1.11/api_docs/python/tf/reset_default_graph) # 一、Q-Learning Q(s,a)指的是对于“在状态s下,采取行动a”的一个回报/价值估计,我们根据Q值来决策agent的下一步行动,并在不断行动的过程中,利用贝尔曼方程来学习/更新Q值,以学习如何更好地表现/达到更好的效果。 - 贝尔曼方程 $$Q(s,a)=Reward+\gamma*(max_{a'} Q(s',a'))$$ 意义:某一个状态与行动对应的Q值,等于当前回报+折现率*(采取下一行动a'后,到达状态s'的最大可能值) - 由贝尔曼方程衍生出的Q值更新公式 借由贝尔曼方程得到的估计Q值的方法,我们采取“采样”(sample)的思想:**sample的本质就是在未知的情况下,去尝试获得碎片信息,然后一点点把碎片拼起来,就获得了完整的信息**。意即每当我们完成一次行动,就相当于做了一次sample: $$sample=Q(s,a)=Reward+\gamma*(max_{a'} Q(s',a'))$$ 然后,我们把根据此次sample获得的新的信息更新到旧的Q值之中(更新的幅度由学习率控制): $$Q(s,a) = Q(s,a) + lr*(r + y*max_a Q(s1,a) - Q(s,a))$$ 意义:新的Q值=旧的Q值+学习率lr\*(回报+折现率\*当前状态s1下能获得的最大Q值-旧的Q值) 然后我们不断重复【行动-采样-更新】这个过程,直到agent的表现令人满意! # 二、Neural Network Based Q-Learning 当s和a都是有限集合的时候,(s,a)这样一个状态+行动的组合是有限的,因此我们可以将所有的Q值都“记住”,比如用一个列为行动,行为状态的表存起来,然后每次更新时,直接更新表中对应的Q值即可。 |Q(s,a)|action-1|action-2|...| |--|--|--|--| |state-1|Q(state-1,action-1)|Q(state-1,action-2)|...| |state-2|Q(state-2,action-1)|Q(state-2,action-2)|...| |...|...|...|...| 但是,更一般的情况其实应该是状态s是一个非常大的集合或者无限集合。比如说,如果冰湖环境不再以方格(字符)作为单位,而是以显示屏像素作为移动单位,那么此时s的可能情况就已经非常大了。 这意味着Q值表的尺寸也会非常大。Q表尺寸大会带来的最直接的问题就是: -(1)你需要训练非常多的episodes以保证每个状态+动作的组合都被采样过足够多的次数,以保证学到足够完整正确的信息,而这样耗时是非常大的。 -(2)你需要很大的空间来存Q表 但是,我们依然需要对Q(s,a)值进行评估,那该怎么办呢? ## 基于神经网络(Neural Network)的Q-Learning方案 神经网络的input layer接受向量形式的输入,于是我们可以考虑将agent当前的状态以one-hot编码的形式输入到network中,然后output layer设置为对于各个行动的评估值向量。换言之,输出就是$[Q(s_i,a_1),Q(s_i,a_2),...,Q(s_i,a_n)]$ ![pic1](NeuralNetworkBasedQLearning-pic1.png) 通过训练,我们期望network能够尽可能准确输出对应状态下的所有Q值,并按照之前类似的做法,选择最大的Q值对应的行动来执行。 而训练network的具体算法依旧是**BP算法**,即误差逆传播(具体请参考*知识参考[2]*的内容,这里只做简单讲解),而误差就产生在:**对当前状态s的估值Q向量V1,与当前状态s下,根据策略选则采取行动a后到达的新状态s'下的Q值向量V2之间,在对行动a估值上的差。这也正是之前的基于表的Q-Learning利用贝尔曼方程针对性地更新Q(s,a)值时所用到的所有变量。** *** - Example 举例来说,若在一个2*2的网格环境((0,0),(0,1),(1,0),(1,1)四个方格)中,假设agent正处在(0,0)位置,即s=(0,0)。 那么我们首先将s=(0,0)编码成向量形式V_s=(1,0,0,0)输入到network,network会输出对于当前状态s的Q值向量:$$V_{qs}=(1.8,1.4,-1.2,0.2)$$,四个分量分别对应状态s下,【向上,向下,向左,向右】的各个行动的Q值; 之后,我们根据我们的行动策略,从V_qs选择Q值最大的那个行动a来执行,即执行【向上】; 网格环境反馈给我们reward=2以及下一状态s'=(0,1),那么我们此时再将新状态s'编码V_{s'}=(0,1,0,0)输入到network,并得到输出$$V_{qs}'=(2,5,0.5,1.2,0.2)$$; 根据Bellman Equation,此时,我们取最大的Qmax值=2.5,并进行处理:$$Q_{processed}=reward+\gamma * Qmax=2+\gamma * 2.5$$,假设$\gamma=0.8$,则Q_{processed}=2+2=4; 接着,我们就将network的目标向量设置为V_qs替换q(s,'向上')的值为Q_processed之后的那个向量,即:$$V_{goal}=(4,1.4,-1.2,0.2)$$; 现在,我们就可以计算估计出的向量$v_{qs}$与目标向量$v_{goal}$的差值,这与有监督学习中的network计算预测向量$\hat{y}与y$之间的差是一模一样的。 最后,按照一般network的误差前馈+梯度下降方式来训练network的参数即可,直到agent获得比较好的性能。 *** ## 超参数 - 学习率:$\gamma$ - 折现率:lr - 随机行动概率:e ``` # Q-Network Learning on FrozenLake # Q网络学习 import gym import numpy as np import random import tensorflow as tf import matplotlib.pyplot as plt %matplotlib inline # 加载环境 env = gym.make('FrozenLake-v0') # Q网络方法 # 架构: # 输入层:1*16 # 无隐藏层 # 输出层 16*4 # 初始化tf计算图 tf.reset_default_graph() # 下面的几行代码建立了网络的前馈部分,它将用于选择行动 inputs1 = tf.placeholder(shape=[1,16],dtype=tf.float32) # 定义输入向量;shape=[1,16]表示一个1*16的列向量,对应着冰湖环境中的16个方格/状态 W = tf.Variable(tf.random_uniform([16,4],0,0.01)) # 定义权重矩阵(此处输入向量是不断要输入新的值的,因此一般用placeholder来承载;而需要训练的参数如w,b都应该用Variable类来定义,因为Variable对象菜会在tf后续的训练中被优化) Qout = tf.matmul(inputs1,W) # 输入向量*输入层到输出层的权重矩阵 predict = tf.argmax(Qout,1) # 预测将要选择的行动 # 下面的几行代码可以获得预测Q值与目标Q值间差值的平方和加总的损失。 nextQ = tf.placeholder(shape=[1,4],dtype=tf.float32) # 定义更新后的Q值向量 loss = tf.reduce_sum(tf.square(nextQ - Qout)) #计算差值向量的总和(损失函数的定义) trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1) # 用梯度下降来训练,学习率设置为0.1 updateModel = trainer.minimize(loss) # 训练模型(trianer最小化损失函数),minimize会自动更新Trainable的Variable类型的参数以最小化损失函数 # 训练网络 init = tf.initializers.global_variables() # 初始化全局参数 # 设置学习参数 y = .99 # 折现率 e = 0.1 # 随机行动的概率 num_episodes = 2000 # 创建列表以包含每个episode对应的总回报与总步数。 jList = [] rList = [] # 启动session;session是运行operation和对Tensor进行估值的必须环境 with tf.Session() as sess: sess.run(init) # 实际执行初始化 for i in range(num_episodes): # 初始化环境并获得初始状态 s = env.reset() rAll = 0 d = False j = 0 # Q网络 while j < 99: j+=1 # 基于Q网络的输出结果,贪婪地选择一个行动(有一定的概率选择随机行动) a,allQ = sess.run([predict,Qout],feed_dict={inputs1:np.identity(16)[s:s+1]}) # 基于随机数决定是否随机行动 if np.random.rand(1) < e: a[0] = env.action_space.sample() # 获得行动后新的状态、回报、游戏是否结束等信息 s1,r,d,_ = env.step(a[0]) # 通过将新的状态向量输入到网络中获得新状态对应的Q值。 Q1 = sess.run(Qout,feed_dict={inputs1:np.identity(16)[s1:s1+1]}) # 获得最大的Q值 # Recall: Q(s,a) = Q(s,a) + lr*(r + y*max_a(Q(s1,a)) - Q(s,a)) maxQ1 = np.max(Q1) targetQ = allQ targetQ[0,a[0]] = r + y*maxQ1 # 用目标和预测的Q值训练网络 _,W1 = sess.run([updateModel,W],feed_dict={inputs1:np.identity(16)[s:s+1],nextQ:targetQ}) rAll += r s = s1 if d == True: # 随着训练的进行,逐渐减少选择随机行为的概率 e = 1./((i/50) + 10) break jList.append(j) rList.append(rAll) print("成功得分的局数占比: " + str(sum(rList)/num_episodes) + "%") ``` # 示例二:[CartPole-v0](https://gym.openai.com/envs/CartPole-v1/) 完成冰湖挑战后,我们再试试在经典的CartPole环境中实现NN-Based Q-Learning。 <video src="http://s3-us-west-2.amazonaws.com/rl-gym-doc/cartpole-no-reset.mp4" controls="controls"> </video> 比较典型、简单的Environment。 - Goal:左右移动黑色的长方块,保持连在长方块上面的棍子不倒下(只要棍子与垂直夹角达到15度,或者长方块左右移动距离超过2.4个单位,游戏就算结束)。 - Reward:每次行动后/每个时间戳,若棍子依然比较竖直(与垂线不超过15度),就获得+1分奖励。 - Actions:左,右 - Observation: ``` Type: Box(4) Num Observation Min Max 0 Cart Position -4.8 4.8 1 Cart Velocity -Inf Inf 2 Pole Angle -24° 24° 3 Pole Velocity At Tip -Inf Inf ``` ``` # Q-Network Learning on CartPole-v0 # Q网络学习 import gym import numpy as np import random import tensorflow as tf import matplotlib.pyplot as plt %matplotlib inline # 加载环境 env = gym.make('CartPole-v0') # Q网络方法 # 架构: # 输入层:1*4 # 无隐藏层 # 输出层 4*2 state_variable_num=4; # cart position, cart velocity, pole angle, pole velocity at tip action_num=2; # left, right # 初始化tf计算图 tf.reset_default_graph() # 下面的几行代码建立了网络的前馈部分,它将用于选择行动 inputs1 = tf.placeholder(shape=[1,state_variable_num],dtype=tf.float32) # 定义输入向量;shape=[1,16]表示一个1*16的列向量,对应着冰湖环境中的16个方格/状态 W = tf.Variable(tf.random_uniform([state_variable_num,action_num],0,0.01)) # 定义权重矩阵(此处输入向量是不断要输入新的值的,因此一般用placeholder来承载;而需要训练的参数如w,b都应该用Variable类来定义,因为Variable对象菜会在tf后续的训练中被优化) Qout = tf.matmul(inputs1,W) # 输入向量*输入层到输出层的权重矩阵 predict = tf.argmax(Qout,1) # 预测将要选择的行动 # 下面的几行代码可以获得预测Q值与目标Q值间差值的平方和加总的损失。 nextQ = tf.placeholder(shape=[1,action_num],dtype=tf.float32) # 定义更新后的Q值向量 loss = tf.reduce_sum(tf.square(nextQ - Qout)) #计算差值向量的总和(损失函数的定义) trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1) # 用梯度下降来训练,学习率设置为0.1 updateModel = trainer.minimize(loss) # 训练模型(trianer最小化损失函数),minimize会自动更新Trainable的Variable类型的参数以最小化损失函数 # 训练网络 init = tf.initializers.global_variables() # 初始化全局参数 # 设置学习参数 y = .99 # 折现率 e = 0.1 # 随机行动的概率 num_episodes = 10 # 创建列表以包含每个episode对应的总回报与总步数。 jList = [] rList = [] # 启动session;session是运行operation和对Tensor进行估值的必须环境 with tf.Session() as sess: sess.run(init) # 实际执行初始化 for i in range(num_episodes): # 初始化环境并获得初始状态 s = env.reset() rAll = 0 d = False j = 0 # Q网络 while j < 99: j+=1 # 基于Q网络的输出结果,贪婪地选择一个行动(有一定的概率选择随机行动) a,allQ = sess.run([predict,Qout],feed_dict={inputs1:np.reshape(s,[1,state_variable_num])}) # 状态不再用one-hot编码,而是直接用四个observation variable的值 # 基于随机数决定是否随机行动 if np.random.rand(1) < e: a[0] = env.action_space.sample() # 获得行动后新的状态、回报、游戏是否结束等信息 s1,r,d,_ = env.step(a[0]) # 通过将新的状态向量输入到网络中获得新状态对应的Q值。 Q1 = sess.run(Qout,feed_dict={inputs1:np.reshape(s1,[1,state_variable_num])}) # 获得最大的Q值 # Recall: Q(s,a) = Q(s,a) + lr*(r + y*max_a(Q(s1,a)) - Q(s,a)) maxQ1 = np.max(Q1) targetQ = allQ targetQ[0,a[0]] = r + y*maxQ1 # 用目标和预测的Q值训练网络 _,W1 = sess.run([updateModel,W],feed_dict={inputs1:np.reshape(s,[1,state_variable_num]),nextQ:targetQ}) rAll += r s = s1 if d == True: # 随着训练的进行,逐渐减少选择随机行为的概率 e = 1./((i/50) + 10) break jList.append(j) rList.append(rAll) print('学到的权重矩阵W:') print(W1) print(rList) print("成功得分的局数占比: " + str(sum(rList)/num_episodes) + "%") # 测试不同agent的性能 def testAgent(W1=None,test_episodes=30): jList = [] rList = [] trained=True # random agent if W1 is None: trained=False for i in range(test_episodes): s = env.reset() rAll = 0 d = False j = 0 while j<99: j+=1 if trained: # 基于Network贪婪地选择一个最优行动(这里去掉了之前训练过程中加入的噪音干扰) # print(np.dot(np.reshape(s,[1,4]),W1)) a = np.argmax(np.dot(np.reshape(s,[1,4]),W1)) else: a=env.action_space.sample() s,r,d,_=env.step(a) rAll+=r; # 判断游戏是否已经结束 if d == True: break jList.append(j) rList.append(rAll) return jList,rList print('基于学习到的Q值表,测试agent的性能:') print() print('随机行动/未经训练的agent平均得分:') randomAgentResult=testAgent() print(randomAgentResult[1]) print(sum(randomAgentResult[1])/len(randomAgentResult[1])) print('Q-Learning agent平均得分:') QLearningAgentResult=testAgent(W1) print(QLearningAgentResult[1]) print(sum(QLearningAgentResult[1])/len(QLearningAgentResult[1])) ```
github_jupyter
# Q-Network Learning on FrozenLake # Q网络学习 import gym import numpy as np import random import tensorflow as tf import matplotlib.pyplot as plt %matplotlib inline # 加载环境 env = gym.make('FrozenLake-v0') # Q网络方法 # 架构: # 输入层:1*16 # 无隐藏层 # 输出层 16*4 # 初始化tf计算图 tf.reset_default_graph() # 下面的几行代码建立了网络的前馈部分,它将用于选择行动 inputs1 = tf.placeholder(shape=[1,16],dtype=tf.float32) # 定义输入向量;shape=[1,16]表示一个1*16的列向量,对应着冰湖环境中的16个方格/状态 W = tf.Variable(tf.random_uniform([16,4],0,0.01)) # 定义权重矩阵(此处输入向量是不断要输入新的值的,因此一般用placeholder来承载;而需要训练的参数如w,b都应该用Variable类来定义,因为Variable对象菜会在tf后续的训练中被优化) Qout = tf.matmul(inputs1,W) # 输入向量*输入层到输出层的权重矩阵 predict = tf.argmax(Qout,1) # 预测将要选择的行动 # 下面的几行代码可以获得预测Q值与目标Q值间差值的平方和加总的损失。 nextQ = tf.placeholder(shape=[1,4],dtype=tf.float32) # 定义更新后的Q值向量 loss = tf.reduce_sum(tf.square(nextQ - Qout)) #计算差值向量的总和(损失函数的定义) trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1) # 用梯度下降来训练,学习率设置为0.1 updateModel = trainer.minimize(loss) # 训练模型(trianer最小化损失函数),minimize会自动更新Trainable的Variable类型的参数以最小化损失函数 # 训练网络 init = tf.initializers.global_variables() # 初始化全局参数 # 设置学习参数 y = .99 # 折现率 e = 0.1 # 随机行动的概率 num_episodes = 2000 # 创建列表以包含每个episode对应的总回报与总步数。 jList = [] rList = [] # 启动session;session是运行operation和对Tensor进行估值的必须环境 with tf.Session() as sess: sess.run(init) # 实际执行初始化 for i in range(num_episodes): # 初始化环境并获得初始状态 s = env.reset() rAll = 0 d = False j = 0 # Q网络 while j < 99: j+=1 # 基于Q网络的输出结果,贪婪地选择一个行动(有一定的概率选择随机行动) a,allQ = sess.run([predict,Qout],feed_dict={inputs1:np.identity(16)[s:s+1]}) # 基于随机数决定是否随机行动 if np.random.rand(1) < e: a[0] = env.action_space.sample() # 获得行动后新的状态、回报、游戏是否结束等信息 s1,r,d,_ = env.step(a[0]) # 通过将新的状态向量输入到网络中获得新状态对应的Q值。 Q1 = sess.run(Qout,feed_dict={inputs1:np.identity(16)[s1:s1+1]}) # 获得最大的Q值 # Recall: Q(s,a) = Q(s,a) + lr*(r + y*max_a(Q(s1,a)) - Q(s,a)) maxQ1 = np.max(Q1) targetQ = allQ targetQ[0,a[0]] = r + y*maxQ1 # 用目标和预测的Q值训练网络 _,W1 = sess.run([updateModel,W],feed_dict={inputs1:np.identity(16)[s:s+1],nextQ:targetQ}) rAll += r s = s1 if d == True: # 随着训练的进行,逐渐减少选择随机行为的概率 e = 1./((i/50) + 10) break jList.append(j) rList.append(rAll) print("成功得分的局数占比: " + str(sum(rList)/num_episodes) + "%") Type: Box(4) Num Observation Min Max 0 Cart Position -4.8 4.8 1 Cart Velocity -Inf Inf 2 Pole Angle -24° 24° 3 Pole Velocity At Tip -Inf Inf # Q-Network Learning on CartPole-v0 # Q网络学习 import gym import numpy as np import random import tensorflow as tf import matplotlib.pyplot as plt %matplotlib inline # 加载环境 env = gym.make('CartPole-v0') # Q网络方法 # 架构: # 输入层:1*4 # 无隐藏层 # 输出层 4*2 state_variable_num=4; # cart position, cart velocity, pole angle, pole velocity at tip action_num=2; # left, right # 初始化tf计算图 tf.reset_default_graph() # 下面的几行代码建立了网络的前馈部分,它将用于选择行动 inputs1 = tf.placeholder(shape=[1,state_variable_num],dtype=tf.float32) # 定义输入向量;shape=[1,16]表示一个1*16的列向量,对应着冰湖环境中的16个方格/状态 W = tf.Variable(tf.random_uniform([state_variable_num,action_num],0,0.01)) # 定义权重矩阵(此处输入向量是不断要输入新的值的,因此一般用placeholder来承载;而需要训练的参数如w,b都应该用Variable类来定义,因为Variable对象菜会在tf后续的训练中被优化) Qout = tf.matmul(inputs1,W) # 输入向量*输入层到输出层的权重矩阵 predict = tf.argmax(Qout,1) # 预测将要选择的行动 # 下面的几行代码可以获得预测Q值与目标Q值间差值的平方和加总的损失。 nextQ = tf.placeholder(shape=[1,action_num],dtype=tf.float32) # 定义更新后的Q值向量 loss = tf.reduce_sum(tf.square(nextQ - Qout)) #计算差值向量的总和(损失函数的定义) trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1) # 用梯度下降来训练,学习率设置为0.1 updateModel = trainer.minimize(loss) # 训练模型(trianer最小化损失函数),minimize会自动更新Trainable的Variable类型的参数以最小化损失函数 # 训练网络 init = tf.initializers.global_variables() # 初始化全局参数 # 设置学习参数 y = .99 # 折现率 e = 0.1 # 随机行动的概率 num_episodes = 10 # 创建列表以包含每个episode对应的总回报与总步数。 jList = [] rList = [] # 启动session;session是运行operation和对Tensor进行估值的必须环境 with tf.Session() as sess: sess.run(init) # 实际执行初始化 for i in range(num_episodes): # 初始化环境并获得初始状态 s = env.reset() rAll = 0 d = False j = 0 # Q网络 while j < 99: j+=1 # 基于Q网络的输出结果,贪婪地选择一个行动(有一定的概率选择随机行动) a,allQ = sess.run([predict,Qout],feed_dict={inputs1:np.reshape(s,[1,state_variable_num])}) # 状态不再用one-hot编码,而是直接用四个observation variable的值 # 基于随机数决定是否随机行动 if np.random.rand(1) < e: a[0] = env.action_space.sample() # 获得行动后新的状态、回报、游戏是否结束等信息 s1,r,d,_ = env.step(a[0]) # 通过将新的状态向量输入到网络中获得新状态对应的Q值。 Q1 = sess.run(Qout,feed_dict={inputs1:np.reshape(s1,[1,state_variable_num])}) # 获得最大的Q值 # Recall: Q(s,a) = Q(s,a) + lr*(r + y*max_a(Q(s1,a)) - Q(s,a)) maxQ1 = np.max(Q1) targetQ = allQ targetQ[0,a[0]] = r + y*maxQ1 # 用目标和预测的Q值训练网络 _,W1 = sess.run([updateModel,W],feed_dict={inputs1:np.reshape(s,[1,state_variable_num]),nextQ:targetQ}) rAll += r s = s1 if d == True: # 随着训练的进行,逐渐减少选择随机行为的概率 e = 1./((i/50) + 10) break jList.append(j) rList.append(rAll) print('学到的权重矩阵W:') print(W1) print(rList) print("成功得分的局数占比: " + str(sum(rList)/num_episodes) + "%") # 测试不同agent的性能 def testAgent(W1=None,test_episodes=30): jList = [] rList = [] trained=True # random agent if W1 is None: trained=False for i in range(test_episodes): s = env.reset() rAll = 0 d = False j = 0 while j<99: j+=1 if trained: # 基于Network贪婪地选择一个最优行动(这里去掉了之前训练过程中加入的噪音干扰) # print(np.dot(np.reshape(s,[1,4]),W1)) a = np.argmax(np.dot(np.reshape(s,[1,4]),W1)) else: a=env.action_space.sample() s,r,d,_=env.step(a) rAll+=r; # 判断游戏是否已经结束 if d == True: break jList.append(j) rList.append(rAll) return jList,rList print('基于学习到的Q值表,测试agent的性能:') print() print('随机行动/未经训练的agent平均得分:') randomAgentResult=testAgent() print(randomAgentResult[1]) print(sum(randomAgentResult[1])/len(randomAgentResult[1])) print('Q-Learning agent平均得分:') QLearningAgentResult=testAgent(W1) print(QLearningAgentResult[1]) print(sum(QLearningAgentResult[1])/len(QLearningAgentResult[1]))
0.27973
0.787278
``` from sympy import * from sympy.abc import * from sympy.galgebra.ga import * import numpy as np from numpy import linalg as LA from __future__ import print_function init_printing() ``` # Operational intensity of differential operation We consder differential operation on a vector $u$ at a given point in 3D with a 1D stencil size $k$ (number of points in the stencil) for every order, the subindex $i$ represent the dimension number $1$ for z, $2$ for $x$ and 3 for $y$, First order : $ \frac{d u}{dx_i} $ Second order : $ \frac{d^2 u}{dx_i^2} $ Second order cross derivative $ \frac{d^2 u}{dx_i dx_j} $ ``` # Arithmetic operations k = symbols('k') s = symbols('s') # 1D stencil # multiplication addition AI_dxi = k + k - 1 AI_dxxi = k + 1 + k - 1 AI_dxxij = 2*k + 2*k-1 # square stencil (all uses the same stencil mask) # multiplication addition AI_dxis = k**2 + k**2 - 1 AI_dxxis = k**2 + k**2 - 1 AI_dxxijs = k**2 + k**2 - 1 # I/O operations # load IO_dxi = k IO_dxxi = k IO_dxxij = 2*k IO_square = k**2 # Operational intensity in single precision print(AI_dxi/(4*IO_dxi)) print(AI_dxxi/(4*IO_dxxi)) print(AI_dxxij/(4*IO_dxxij)) print(AI_dxis/(4*IO_square)) print(AI_dxxis/(4*IO_square)) print(AI_dxxijs/(4*IO_square)) OI_dxi = lambdify(k,AI_dxi/(4*IO_dxi)) OI_dxxi = lambdify(k,AI_dxxi/(4*IO_dxxi)) OI_dxxij = lambdify(k,AI_dxxij/(4*IO_dxxij)) OI_dxis = lambdify(k,AI_dxis/(4*IO_dxxij)) OI_dxxis = lambdify(k,AI_dxxis/(4*IO_dxxij)) OI_dxxijs = lambdify(k,AI_dxxijs/(4*IO_dxxij)) ``` # Operational intensity of wave equations We now consider geophysical wave equations to obtain the theoretical expression of the operational intensity. We write directly the expression of a single time step as a function of differential operators. An operation on a wavefield is counted only once as we consider the minimum of arithmetic operations required. ## Acoustic isotropic $ u(x,y,z,t+dt) = dt^2 v^2(x,y,z) ( 2 u(x,y,z,t) + u(x,y,z,t-dt) + \nabla^2 u(x,y,z,t) +q ) $ ## VTI $ p(x,y,z,t+dt) = dt^2 v^2(x,y,z) \left( 2 p(x,y,z,t) + p(x,y,z,t-dt) +(1+2\epsilon)(\frac{d^2 p(x,t)}{dx^2}+\frac{d^2 p(x,t)}{dyx^2}) + \sqrt{(1+2\delta)} \frac{d^2 r(x,t)}{dz^2} + q \right) $ $ r(x,y,z,t+dt) = dt^2 v^2(x,y,z) \left( 2 r(x,y,z,t) + r(x,y,z,t-dt) +\sqrt{(1+2\delta)}(\frac{d^2 p(x,t)}{dx^2}+ \frac{d^2 p(x,t)}{dy^2}) + \frac{d^2 r(x,t)}{dz^2} + q \right) $ ## TTI $ p(x,y,z,t+dt) = dt^2 v^2(x,y,z) \left( 2 p(x,y,z,t) + p(x,y,z,t-dt) + (1+2\epsilon) (G_{\bar{x}\bar{x}} + G_{\bar{y}\bar{y}}) p(x,y,z,t) + \sqrt{(1+2\delta)} G_{\bar{z}\bar{z}} r(x,y,z,t) + q \right) $ $ r(x,y,z,t+dt) = dt^2 v^2(x,y,z) \left( 2 r(x,y,z,t) + r(x,y,z,t-dt) + \sqrt{(1+2\delta)}(G_{\bar{x}\bar{x}} + G_{\bar{y}\bar{y}}) p(x,y,z,t) + G_{\bar{z}\bar{z}} r(x,y,z) +q \right) $ where $ \begin{cases} G_{\bar{x}\bar{x}} & = cos(\phi)^2 cos(\theta)^2 \frac{d^2}{dx^2} +sin(\phi)^2 cos(\theta)^2 \frac{d^2}{dy^2}+ sin(\theta)^2 \frac{d^2}{dz^2} + sin(2\phi) cos(\theta)^2 \frac{d^2}{dx dy} - sin(\phi) sin(2\theta) \frac{d^2}{dy dz} -cos(\phi) sin(2\theta) \frac{d^2}{dx dz} \\ G_{\bar{y}\bar{y}} & = sin(\phi)^2 \frac{d^2}{dx^2} +cos(\phi)^2 \frac{d^2}{dy^2} - sin(2\phi)^2 \frac{d^2}{dx dy}\\ G_{\bar{z}\bar{z}} & = cos(\phi)^2 sin(\theta)^2 \frac{d^2}{dx^2} +sin(\phi)^2 sin(\theta)^2 \frac{d^2}{dy^2}+ cos(\theta)^2 \frac{d^2}{dz^2} + sin(2\phi) sin(\theta)^2 \frac{d^2}{dx dy} + sin(\phi) sin(2\theta) \frac{d^2}{dy dz} +cos(\phi) sin(2\theta) \frac{d^2}{dx dz} \\ \end{cases} $ ``` # Arithmetic # dxi dxxi dxxij multiplications additions duplicates AI_acou = 0*AI_dxi + 3*AI_dxxi + 0*AI_dxxij + 3 + 5 - 2 * 2 AI_vti = 2 * ( 0*AI_dxi + 3*AI_dxxi + 0*AI_dxxij + 5 + 5 - 2 ) AI_tti = 2 * ( 0*AI_dxi + 3*AI_dxxi + 3*AI_dxxij + 44 + 17 - 8 ) AI_acoums = 0*AI_dxi + 3*s*AI_dxxi + 0*AI_dxxij + 3*s + 5*s - 2 * 2 *s AI_vtims = 2 * ( 0*AI_dxi + 3*s*AI_dxxi + 0*AI_dxxij + 5*s + 5*s - 2*s ) AI_ttims = 2 * ( 0*AI_dxi + 3*s*AI_dxxi + 3*s*AI_dxxij + 44*s + 17*s - 8*s ) AI_acous = 0*AI_dxis + 3*AI_dxxis + 0*AI_dxxijs + 3 + 5 - 2 * 2 AI_vtis = 2 * ( 0*AI_dxis + 3*AI_dxxis + 0*AI_dxxijs + 5 + 5 - 2 * 2 ) AI_ttis = 2 * ( 0*AI_dxis + 3*AI_dxxis + 3*AI_dxxijs + 44 + 17 - 3*k**2 ) # I/O operations (we load a point once only) # dxi dxxi dxxij duplicate other load/write IO_acou = 0*IO_dxi + 3*IO_dxxi + 0*IO_dxxij - 2 + 3 IO_vti = 2 * ( 0*IO_dxi + 3*IO_dxxi + 0*IO_dxxij - 2 + 2 ) + 3 IO_tti = 2 * ( 0*IO_dxi + 3*IO_dxxi + 3*IO_dxxij - 3*k +2 + 4 ) + 7 IO_acoums = 0*IO_dxi + 3*s*IO_dxxi + 0*IO_dxxij - 2*s + 3*s+1 IO_vtims = 2 * ( 0*IO_dxi + 3*s*IO_dxxi + 0*IO_dxxij - 2*s + 2*s ) + 3 IO_ttims = 2 * ( 0*IO_dxi + 3*s*IO_dxxi + 3*s*IO_dxxij - s*(3*k +2) + 4*s ) + 7 IO_acous = 0*IO_square + 3*IO_square + 0*IO_square - 2 + 3 IO_vtis = 2 * ( 0*IO_square + 3*IO_square + 0*IO_square - 2 + 2 ) + 3 IO_ttis = 2 * ( 0*IO_square + 3*IO_square + 3*IO_square - 3*IO_square+ 4 ) + 7 print(simplify(AI_acou/(4*IO_acou))) print(simplify(AI_vti/(4*IO_vti))) print(simplify(AI_tti/(4*IO_tti))) print(simplify(AI_acoums/(4*IO_acoums))) print(simplify(AI_vtims/(4*IO_vtims))) print(simplify(AI_ttims/(4*IO_ttims))) print(simplify(AI_acous/(4*IO_acous))) print(simplify(AI_vtis/(4*IO_vtis))) print(simplify(AI_ttis/(4*IO_ttis))) OI_acou = lambdify(k,AI_acou/(4*IO_acou)) OI_vti = lambdify(k,AI_vti/(4*IO_vti)) OI_tti = lambdify(k,AI_tti/(4*IO_tti)) OI_acoums = lambdify((k,s),AI_acoums/(4*IO_acoums)) OI_vtims = lambdify((k,s),AI_vtims/(4*IO_vtims)) OI_ttims = lambdify((k,s),AI_ttims/(4*IO_ttims)) OI_acous = lambdify(k,AI_acous/(4*IO_acous)) OI_vtis = lambdify(k,AI_vtis/(4*IO_vtis)) OI_ttis = lambdify(k,AI_ttis/(4*IO_ttis)) print(limit(OI_acou(k),k,oo)) print(limit(OI_vti(k),k,oo)) print(limit(OI_tti(k),k,oo)) print(limit(OI_acoums(3,s),s,oo)) print(limit(OI_vtims(3,s),s,oo)) print(limit(OI_ttims(3,s),s,oo)) print(limit(OI_acous(k),k,oo)) print(limit(OI_vtis(k),k,oo)) print(limit(OI_ttis(k),k,oo)) kk=[3,5,7,9,11,13,15,17,19,21,23,25,27,29,31] ss=[2,4,8,16,32,64] OI_wave=np.zeros((15,6)) OI_wavems=np.zeros((15,6,3)) OI=np.zeros((15,3)) for i in range(0,15): OI_wave[i,0]=OI_acou(kk[i]) OI_wave[i,1]=OI_vti(kk[i]) OI_wave[i,2]=OI_tti(kk[i]) OI_wave[i,3]=OI_acous(kk[i]) OI_wave[i,4]=OI_vtis(kk[i]) OI_wave[i,5]=OI_ttis(kk[i]) OI[i,0]=OI_dxi(kk[i]) OI[i,1]=OI_dxxi(kk[i]) OI[i,2]=OI_dxxij(kk[i]) for j in range(0,6): OI_wavems[i,j,0]=OI_acoums(kk[i],ss[j]) OI_wavems[i,j,1]=OI_vtims(kk[i],ss[j]) OI_wavems[i,j,2]=OI_ttims(kk[i],ss[j]) import matplotlib.pyplot as plt fig = plt.figure() plt.hold("off") acou = plt.plot(OI_wave[:,0],label='acou') # this is how you'd plot a single line... vti = plt.plot(OI_wave[:,1],label='vti') # this is how you'd plot a single line... tti = plt.plot(OI_wave[:,2],label='tti') # this is how you'd plot a single line... fig = plt.figure() plt.hold("off") acou = plt.plot(OI_wave[:,3],label='acous') # this is how you'd plot a single line... vti = plt.plot(OI_wave[:,4],label='vtis') # this is how you'd plot a single line... tti = plt.plot(OI_wave[:,5],label='ttis') # this is how you'd plot a single line... fig = plt.figure() plt.hold("off") acou = plt.plot(OI_wavems[:,2,0],label='acous') # this is how you'd plot a single line... vti = plt.plot(OI_wavems[:,2,1],label='vtis') # this is how you'd plot a single line... tti = plt.plot(OI_wavems[:,2,2],label='ttis') # this is how you'd plot a single line... fig = plt.figure() plt.hold("off") acou = plt.plot(OI_wavems[:,5,0],label='acous') # this is how you'd plot a single line... vti = plt.plot(OI_wavems[:,5,1],label='vtis') # this is how you'd plot a single line... tti = plt.plot(OI_wavems[:,5,2],label='ttis') # this is how you'd plot a single line... plt.show() ```
github_jupyter
from sympy import * from sympy.abc import * from sympy.galgebra.ga import * import numpy as np from numpy import linalg as LA from __future__ import print_function init_printing() # Arithmetic operations k = symbols('k') s = symbols('s') # 1D stencil # multiplication addition AI_dxi = k + k - 1 AI_dxxi = k + 1 + k - 1 AI_dxxij = 2*k + 2*k-1 # square stencil (all uses the same stencil mask) # multiplication addition AI_dxis = k**2 + k**2 - 1 AI_dxxis = k**2 + k**2 - 1 AI_dxxijs = k**2 + k**2 - 1 # I/O operations # load IO_dxi = k IO_dxxi = k IO_dxxij = 2*k IO_square = k**2 # Operational intensity in single precision print(AI_dxi/(4*IO_dxi)) print(AI_dxxi/(4*IO_dxxi)) print(AI_dxxij/(4*IO_dxxij)) print(AI_dxis/(4*IO_square)) print(AI_dxxis/(4*IO_square)) print(AI_dxxijs/(4*IO_square)) OI_dxi = lambdify(k,AI_dxi/(4*IO_dxi)) OI_dxxi = lambdify(k,AI_dxxi/(4*IO_dxxi)) OI_dxxij = lambdify(k,AI_dxxij/(4*IO_dxxij)) OI_dxis = lambdify(k,AI_dxis/(4*IO_dxxij)) OI_dxxis = lambdify(k,AI_dxxis/(4*IO_dxxij)) OI_dxxijs = lambdify(k,AI_dxxijs/(4*IO_dxxij)) # Arithmetic # dxi dxxi dxxij multiplications additions duplicates AI_acou = 0*AI_dxi + 3*AI_dxxi + 0*AI_dxxij + 3 + 5 - 2 * 2 AI_vti = 2 * ( 0*AI_dxi + 3*AI_dxxi + 0*AI_dxxij + 5 + 5 - 2 ) AI_tti = 2 * ( 0*AI_dxi + 3*AI_dxxi + 3*AI_dxxij + 44 + 17 - 8 ) AI_acoums = 0*AI_dxi + 3*s*AI_dxxi + 0*AI_dxxij + 3*s + 5*s - 2 * 2 *s AI_vtims = 2 * ( 0*AI_dxi + 3*s*AI_dxxi + 0*AI_dxxij + 5*s + 5*s - 2*s ) AI_ttims = 2 * ( 0*AI_dxi + 3*s*AI_dxxi + 3*s*AI_dxxij + 44*s + 17*s - 8*s ) AI_acous = 0*AI_dxis + 3*AI_dxxis + 0*AI_dxxijs + 3 + 5 - 2 * 2 AI_vtis = 2 * ( 0*AI_dxis + 3*AI_dxxis + 0*AI_dxxijs + 5 + 5 - 2 * 2 ) AI_ttis = 2 * ( 0*AI_dxis + 3*AI_dxxis + 3*AI_dxxijs + 44 + 17 - 3*k**2 ) # I/O operations (we load a point once only) # dxi dxxi dxxij duplicate other load/write IO_acou = 0*IO_dxi + 3*IO_dxxi + 0*IO_dxxij - 2 + 3 IO_vti = 2 * ( 0*IO_dxi + 3*IO_dxxi + 0*IO_dxxij - 2 + 2 ) + 3 IO_tti = 2 * ( 0*IO_dxi + 3*IO_dxxi + 3*IO_dxxij - 3*k +2 + 4 ) + 7 IO_acoums = 0*IO_dxi + 3*s*IO_dxxi + 0*IO_dxxij - 2*s + 3*s+1 IO_vtims = 2 * ( 0*IO_dxi + 3*s*IO_dxxi + 0*IO_dxxij - 2*s + 2*s ) + 3 IO_ttims = 2 * ( 0*IO_dxi + 3*s*IO_dxxi + 3*s*IO_dxxij - s*(3*k +2) + 4*s ) + 7 IO_acous = 0*IO_square + 3*IO_square + 0*IO_square - 2 + 3 IO_vtis = 2 * ( 0*IO_square + 3*IO_square + 0*IO_square - 2 + 2 ) + 3 IO_ttis = 2 * ( 0*IO_square + 3*IO_square + 3*IO_square - 3*IO_square+ 4 ) + 7 print(simplify(AI_acou/(4*IO_acou))) print(simplify(AI_vti/(4*IO_vti))) print(simplify(AI_tti/(4*IO_tti))) print(simplify(AI_acoums/(4*IO_acoums))) print(simplify(AI_vtims/(4*IO_vtims))) print(simplify(AI_ttims/(4*IO_ttims))) print(simplify(AI_acous/(4*IO_acous))) print(simplify(AI_vtis/(4*IO_vtis))) print(simplify(AI_ttis/(4*IO_ttis))) OI_acou = lambdify(k,AI_acou/(4*IO_acou)) OI_vti = lambdify(k,AI_vti/(4*IO_vti)) OI_tti = lambdify(k,AI_tti/(4*IO_tti)) OI_acoums = lambdify((k,s),AI_acoums/(4*IO_acoums)) OI_vtims = lambdify((k,s),AI_vtims/(4*IO_vtims)) OI_ttims = lambdify((k,s),AI_ttims/(4*IO_ttims)) OI_acous = lambdify(k,AI_acous/(4*IO_acous)) OI_vtis = lambdify(k,AI_vtis/(4*IO_vtis)) OI_ttis = lambdify(k,AI_ttis/(4*IO_ttis)) print(limit(OI_acou(k),k,oo)) print(limit(OI_vti(k),k,oo)) print(limit(OI_tti(k),k,oo)) print(limit(OI_acoums(3,s),s,oo)) print(limit(OI_vtims(3,s),s,oo)) print(limit(OI_ttims(3,s),s,oo)) print(limit(OI_acous(k),k,oo)) print(limit(OI_vtis(k),k,oo)) print(limit(OI_ttis(k),k,oo)) kk=[3,5,7,9,11,13,15,17,19,21,23,25,27,29,31] ss=[2,4,8,16,32,64] OI_wave=np.zeros((15,6)) OI_wavems=np.zeros((15,6,3)) OI=np.zeros((15,3)) for i in range(0,15): OI_wave[i,0]=OI_acou(kk[i]) OI_wave[i,1]=OI_vti(kk[i]) OI_wave[i,2]=OI_tti(kk[i]) OI_wave[i,3]=OI_acous(kk[i]) OI_wave[i,4]=OI_vtis(kk[i]) OI_wave[i,5]=OI_ttis(kk[i]) OI[i,0]=OI_dxi(kk[i]) OI[i,1]=OI_dxxi(kk[i]) OI[i,2]=OI_dxxij(kk[i]) for j in range(0,6): OI_wavems[i,j,0]=OI_acoums(kk[i],ss[j]) OI_wavems[i,j,1]=OI_vtims(kk[i],ss[j]) OI_wavems[i,j,2]=OI_ttims(kk[i],ss[j]) import matplotlib.pyplot as plt fig = plt.figure() plt.hold("off") acou = plt.plot(OI_wave[:,0],label='acou') # this is how you'd plot a single line... vti = plt.plot(OI_wave[:,1],label='vti') # this is how you'd plot a single line... tti = plt.plot(OI_wave[:,2],label='tti') # this is how you'd plot a single line... fig = plt.figure() plt.hold("off") acou = plt.plot(OI_wave[:,3],label='acous') # this is how you'd plot a single line... vti = plt.plot(OI_wave[:,4],label='vtis') # this is how you'd plot a single line... tti = plt.plot(OI_wave[:,5],label='ttis') # this is how you'd plot a single line... fig = plt.figure() plt.hold("off") acou = plt.plot(OI_wavems[:,2,0],label='acous') # this is how you'd plot a single line... vti = plt.plot(OI_wavems[:,2,1],label='vtis') # this is how you'd plot a single line... tti = plt.plot(OI_wavems[:,2,2],label='ttis') # this is how you'd plot a single line... fig = plt.figure() plt.hold("off") acou = plt.plot(OI_wavems[:,5,0],label='acous') # this is how you'd plot a single line... vti = plt.plot(OI_wavems[:,5,1],label='vtis') # this is how you'd plot a single line... tti = plt.plot(OI_wavems[:,5,2],label='ttis') # this is how you'd plot a single line... plt.show()
0.427636
0.843573
# Event Sampling ## Prerequisites To understand how to generate a Model and a MapDataset, and how to fit the data, please refer to the `~gammapy.modeling.models.SkyModel` and [simulate_3d](simulate_3d.ipynb). ## Context This tutorial describes how to sample events from an observation of a one (or more) gamma-ray source(s). The main aim of the tutorial will be to set the minimal configuration needed to deal with the Gammapy event-sampler and how to obtain an output photon event list. The core of the event sampling lies into the Gammapy `~gammapy.datasets.MapDatasetEventSampler` class, which is based on the inverse cumulative distribution function [(Inverse CDF)](https://en.wikipedia.org/wiki/Cumulative_distribution_function#Inverse_distribution_function_(quantile_function)). The `~gammapy.datasets.MapDatasetEventSampler` takes in input a `~gammapy.datasets.Dataset` object containing the spectral, spatial and temporal properties of the source(s) of interest. The `~gammapy.datasets.MapDatasetEventSampler` class evaluates the map of predicted counts (`npred`) per bin of the given Sky model, and the `npred` map is then used to sample the events. In particular, the output of the event-sampler will be a set of events having information about their true coordinates, true energies and times of arrival. To these events, IRF corrections (i.e. PSF and energy dispersion) can also further applied in order to obtain reconstructed coordinates and energies of the sampled events. At the end of this process, you will obtain an event-list in FITS format. ## Objective Describe the process of sampling events from a given Sky model and obtaining an output event-list. ## Proposed approach In this section, we will show how to define a `gammapy.data.Observations` and to create a `~gammapy.datasets.Dataset` object (for more info on `~gammapy.datasets.Dataset` objects, please visit this [link](analysis_2.ipynb#Preparing-reduced-datasets-geometry)). These are both necessary for the event sampling. Then, we will define the Sky model from which we sample events. In this tutorial, we propose two examples for sampling events: one chosing a point-like source and one using a template map. ## Setup As usual, let's start with some general imports... ``` %matplotlib inline import matplotlib.pyplot as plt from pathlib import Path import numpy as np import copy import astropy.units as u from astropy.io import fits from astropy.coordinates import SkyCoord from gammapy.data import DataStore, GTI, Observation from gammapy.datasets import MapDataset, MapDatasetEventSampler from gammapy.maps import MapAxis, WcsGeom, Map from gammapy.irf import load_cta_irfs from gammapy.makers import MapDatasetMaker from gammapy.modeling import Fit from gammapy.modeling.models import ( Model, Models, SkyModel, PowerLawSpectralModel, PowerLawNormSpectralModel, PointSpatialModel, GaussianSpatialModel, TemplateSpatialModel, FoVBackgroundModel, ) from regions import CircleSkyRegion ``` ### Define an Observation You can firstly create a `gammapy.data.Observations` object that contains the pointing position, the GTIs and the IRF you want to consider. Hereafter, we chose the IRF of the South configuration used for the CTA DC1 and we set the pointing position of the simulated field at the Galactic Center. We also fix the exposure time to 1 hr. Let's start with some initial settings: ``` filename = ( "$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits" ) pointing = SkyCoord(0.0, 0.0, frame="galactic", unit="deg") livetime = 1 * u.hr ``` Now you can create the observation: ``` irfs = load_cta_irfs(filename) observation = Observation.create( obs_id=1001, pointing=pointing, livetime=livetime, irfs=irfs ) ``` ### Define the MapDataset Let's generate the `~gammapy.datasets.Dataset` object: we define the energy axes (true and reconstruncted), the migration axis and the geometry of the observation. *This is a crucial point for the correct configuration of the event sampler. Indeed the spatial and energetic binning should be treaten carefully and... the finer the better. For this reason, we suggest to define the energy axes by setting a minimum binning of least 10-20 bins per decade for all the sources of interest. The spatial binning may instead be different from source to source and, at first order, it should be adopted a binning significantly smaller than the expected source size.* For the examples that will be shown hereafter, we set the geometry of the dataset to a field of view of 2degx2deg and we bin the spatial map with pixels of 0.02 deg. ``` energy_axis = MapAxis.from_energy_bounds( "0.1 TeV", "100 TeV", nbin=10, per_decade=True ) energy_axis_true = MapAxis.from_energy_bounds( "0.03 TeV", "300 TeV", nbin=20, per_decade=True, name="energy_true" ) migra_axis = MapAxis.from_bounds( 0.5, 2, nbin=150, node_type="edges", name="migra" ) geom = WcsGeom.create( skydir=pointing, width=(2, 2), binsz=0.02, frame="galactic", axes=[energy_axis], ) ``` In the following, the dataset is created by selecting the effective area, background model, the PSF and the Edisp from the IRF. The dataset thus produced can be saved into a FITS file just using the `write()` function. We put it into the `evt_sampling` sub-folder: ``` %%time empty = MapDataset.create( geom, energy_axis_true=energy_axis_true, migra_axis=migra_axis, name="my-dataset", ) maker = MapDatasetMaker(selection=["exposure", "background", "psf", "edisp"]) dataset = maker.run(empty, observation) Path("event_sampling").mkdir(exist_ok=True) dataset.write("./event_sampling/dataset.fits", overwrite=True) ``` ### Define the Sky model: a point-like source Now let's define a Sky model (see how to create it [here](models.ipynb)) for a point-like source centered 0.5 deg far from the Galactic Center and with a power-law spectrum. We then save the model into a yaml file. ``` spectral_model_pwl = PowerLawSpectralModel( index=2, amplitude="1e-12 TeV-1 cm-2 s-1", reference="1 TeV" ) spatial_model_point = PointSpatialModel( lon_0="0 deg", lat_0="0.5 deg", frame="galactic" ) sky_model_pntpwl = SkyModel( spectral_model=spectral_model_pwl, spatial_model=spatial_model_point, name="point-pwl", ) bkg_model = FoVBackgroundModel(dataset_name="my-dataset") models = Models([sky_model_pntpwl, bkg_model]) file_model = "./event_sampling/point-pwl.yaml" models.write(file_model, overwrite=True) ``` ### Sampling the source and background events Now, we can finally add the `~gammapy.modeling.models.SkyModel` we want to event-sample to the `~gammapy.datasets.Dataset` container: ``` dataset.models = models print(dataset.models) ``` The next step shows how to sample the events with the `~gammapy.datasets.MapDatasetEventSampler` class. The class requests a random number seed generator (that we set with `random_state=0`), the `~gammapy.datasets.Dataset` and the `gammapy.data.Observations` object. From the latter, the `~gammapy.datasets.MapDatasetEventSampler` class takes all the meta data information. ``` %%time sampler = MapDatasetEventSampler(random_state=0) events = sampler.run(dataset, observation) ``` The output of the event-sampler is an event list with coordinates, energies and time of arrivals of the source and background events. Source and background events are flagged by the MC_ID identifier (where 0 is the default identifier for the background). ``` print(f"Source events: {(events.table['MC_ID'] == 1).sum()}") print(f"Background events: {(events.table['MC_ID'] == 0).sum()}") ``` We can inspect the properties of the simulated events as follows: ``` events.select_offset([0, 1] * u.deg).peek() ``` By default, the `~gammapy.datasets.MapDatasetEventSampler` fills the metadata keyword `OBJECT` in the event list using the first model of the SkyModel object. You can change it with the following commands: ``` events.table.meta["OBJECT"] = dataset.models[0].name ``` Let's write the event list and its GTI extension to a FITS file. We make use of `fits` library in `astropy`: ``` primary_hdu = fits.PrimaryHDU() hdu_evt = fits.BinTableHDU(events.table) hdu_gti = fits.BinTableHDU(dataset.gti.table, name="GTI") hdu_all = fits.HDUList([primary_hdu, hdu_evt, hdu_gti]) hdu_all.writeto("./event_sampling/events_0001.fits", overwrite=True) ``` #### Generate a skymap A skymap of the simulated events can be obtained with: ``` counts = Map.create( frame="galactic", skydir=(0, 0.0), binsz=0.02, npix=(100, 100) ) counts.fill_events(events) counts.plot(add_cbar=True); ``` #### Fit the simulated data We can now check the sake of the event sampling by fitting the data (a tutorial of source fitting is [here](analysis_2.ipynb#Fit-the-model) and [here](simulate_3d.ipynb). We make use of the same `~gammapy.modeling.models.Models` adopted for the simulation. Hence, we firstly read the `~gammapy.datasets.Dataset` and the model file, and we fill the `~gammapy.datasets.Dataset` with the sampled events. ``` models_fit = Models.read("./event_sampling/point-pwl.yaml") counts = Map.from_geom(geom) counts.fill_events(events) dataset.counts = counts dataset.models = models_fit ``` Let's fit the data and look at the results: ``` %%time fit = Fit([dataset]) result = fit.run(optimize_opts={"print_level": 1}) print(result) result.parameters.to_table() ``` The results looks great! ## Extended source using a template The event sampler can also work with a template model. Here we use the interstellar emission model map of the Fermi 3FHL, which can be found in the GAMMAPY data repository. We proceed following the same steps showed above and we finally have a look at the event's properties: ``` template_model = TemplateSpatialModel.read( "$GAMMAPY_DATA/fermi-3fhl-gc/gll_iem_v06_gc.fits.gz", normalize=False ) # we make the model brighter artificially so that it becomes visible over the background diffuse = SkyModel( spectral_model=PowerLawNormSpectralModel(norm=5), spatial_model=template_model, name="template-model", ) bkg_model = FoVBackgroundModel(dataset_name="my-dataset") models_diffuse = Models([diffuse, bkg_model]) file_model = "./event_sampling/diffuse.yaml" models_diffuse.write(file_model, overwrite=True) dataset.models = models_diffuse print(dataset.models) %%time sampler = MapDatasetEventSampler(random_state=0) events = sampler.run(dataset, observation) events.select_offset([0, 1] * u.deg).peek() ``` ### Simulate mutiple event list In some user case, you may want to sample events from a number of observations. In this section, we show how to simulate a set of event lists. For simplicity we consider only one point-like source, observed three times for 1 hr and assuming the same pointing position. Let's firstly define the time start and the livetime of each observation: ``` tstarts = [1, 5, 7] * u.hr livetimes = [1, 1, 1] * u.hr %%time for idx, tstart in enumerate(tstarts): observation = Observation.create( obs_id=idx, pointing=pointing, tstart=tstart, livetime=livetimes[idx], irfs=irfs, ) dataset = maker.run(empty, observation) dataset.models = models sampler = MapDatasetEventSampler(random_state=idx) events = sampler.run(dataset, observation) events.table.write( f"./event_sampling/events_{idx:04d}.fits", overwrite=True ) ``` You can now load the event list with `Datastore.from_events_files()` and make your own analysis following the instructions in the [`analysis_2`](analysis_2.ipynb) tutorial. ``` path = Path("./event_sampling/") paths = list(path.rglob("events*.fits")) data_store = DataStore.from_events_files(paths) data_store.obs_table ``` <!-- ## Read simulated event lists with Datastore.from_events_lists Here we show how to simulate a set of event lists of the same Sky model, but with different GTIs. We make use of the settings we applied previously. Let's define the GTI firstly, chosing a time start and a duration of the observation: --> ## Exercises - Try to sample events for an extended source (e.g. a radial gaussian morphology); - Change the spatial model and the spectrum of the simulated Sky model; - Include a temporal model in the simulation
github_jupyter
%matplotlib inline import matplotlib.pyplot as plt from pathlib import Path import numpy as np import copy import astropy.units as u from astropy.io import fits from astropy.coordinates import SkyCoord from gammapy.data import DataStore, GTI, Observation from gammapy.datasets import MapDataset, MapDatasetEventSampler from gammapy.maps import MapAxis, WcsGeom, Map from gammapy.irf import load_cta_irfs from gammapy.makers import MapDatasetMaker from gammapy.modeling import Fit from gammapy.modeling.models import ( Model, Models, SkyModel, PowerLawSpectralModel, PowerLawNormSpectralModel, PointSpatialModel, GaussianSpatialModel, TemplateSpatialModel, FoVBackgroundModel, ) from regions import CircleSkyRegion filename = ( "$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits" ) pointing = SkyCoord(0.0, 0.0, frame="galactic", unit="deg") livetime = 1 * u.hr irfs = load_cta_irfs(filename) observation = Observation.create( obs_id=1001, pointing=pointing, livetime=livetime, irfs=irfs ) energy_axis = MapAxis.from_energy_bounds( "0.1 TeV", "100 TeV", nbin=10, per_decade=True ) energy_axis_true = MapAxis.from_energy_bounds( "0.03 TeV", "300 TeV", nbin=20, per_decade=True, name="energy_true" ) migra_axis = MapAxis.from_bounds( 0.5, 2, nbin=150, node_type="edges", name="migra" ) geom = WcsGeom.create( skydir=pointing, width=(2, 2), binsz=0.02, frame="galactic", axes=[energy_axis], ) %%time empty = MapDataset.create( geom, energy_axis_true=energy_axis_true, migra_axis=migra_axis, name="my-dataset", ) maker = MapDatasetMaker(selection=["exposure", "background", "psf", "edisp"]) dataset = maker.run(empty, observation) Path("event_sampling").mkdir(exist_ok=True) dataset.write("./event_sampling/dataset.fits", overwrite=True) spectral_model_pwl = PowerLawSpectralModel( index=2, amplitude="1e-12 TeV-1 cm-2 s-1", reference="1 TeV" ) spatial_model_point = PointSpatialModel( lon_0="0 deg", lat_0="0.5 deg", frame="galactic" ) sky_model_pntpwl = SkyModel( spectral_model=spectral_model_pwl, spatial_model=spatial_model_point, name="point-pwl", ) bkg_model = FoVBackgroundModel(dataset_name="my-dataset") models = Models([sky_model_pntpwl, bkg_model]) file_model = "./event_sampling/point-pwl.yaml" models.write(file_model, overwrite=True) dataset.models = models print(dataset.models) %%time sampler = MapDatasetEventSampler(random_state=0) events = sampler.run(dataset, observation) print(f"Source events: {(events.table['MC_ID'] == 1).sum()}") print(f"Background events: {(events.table['MC_ID'] == 0).sum()}") events.select_offset([0, 1] * u.deg).peek() events.table.meta["OBJECT"] = dataset.models[0].name primary_hdu = fits.PrimaryHDU() hdu_evt = fits.BinTableHDU(events.table) hdu_gti = fits.BinTableHDU(dataset.gti.table, name="GTI") hdu_all = fits.HDUList([primary_hdu, hdu_evt, hdu_gti]) hdu_all.writeto("./event_sampling/events_0001.fits", overwrite=True) counts = Map.create( frame="galactic", skydir=(0, 0.0), binsz=0.02, npix=(100, 100) ) counts.fill_events(events) counts.plot(add_cbar=True); models_fit = Models.read("./event_sampling/point-pwl.yaml") counts = Map.from_geom(geom) counts.fill_events(events) dataset.counts = counts dataset.models = models_fit %%time fit = Fit([dataset]) result = fit.run(optimize_opts={"print_level": 1}) print(result) result.parameters.to_table() template_model = TemplateSpatialModel.read( "$GAMMAPY_DATA/fermi-3fhl-gc/gll_iem_v06_gc.fits.gz", normalize=False ) # we make the model brighter artificially so that it becomes visible over the background diffuse = SkyModel( spectral_model=PowerLawNormSpectralModel(norm=5), spatial_model=template_model, name="template-model", ) bkg_model = FoVBackgroundModel(dataset_name="my-dataset") models_diffuse = Models([diffuse, bkg_model]) file_model = "./event_sampling/diffuse.yaml" models_diffuse.write(file_model, overwrite=True) dataset.models = models_diffuse print(dataset.models) %%time sampler = MapDatasetEventSampler(random_state=0) events = sampler.run(dataset, observation) events.select_offset([0, 1] * u.deg).peek() tstarts = [1, 5, 7] * u.hr livetimes = [1, 1, 1] * u.hr %%time for idx, tstart in enumerate(tstarts): observation = Observation.create( obs_id=idx, pointing=pointing, tstart=tstart, livetime=livetimes[idx], irfs=irfs, ) dataset = maker.run(empty, observation) dataset.models = models sampler = MapDatasetEventSampler(random_state=idx) events = sampler.run(dataset, observation) events.table.write( f"./event_sampling/events_{idx:04d}.fits", overwrite=True ) path = Path("./event_sampling/") paths = list(path.rglob("events*.fits")) data_store = DataStore.from_events_files(paths) data_store.obs_table
0.670608
0.992401
# Now You Code 3: Shopping Cart In this program you will implement an online shopping cart using a Python list of dictionary. The dictionary will contain the product name, price and quantity. The program should loop continually, asking the user to enter - Product name - Product price - Product quantity until the user enters a product name of `'checkout'` at which time the loop should break. Each time through the loop you should create a dictionary of product name, product price and product quantity then add the dictionary to a list. After you enter `'checkout'` the program should show: - all the items in the cart, including their quantity and price - and the total amount of the order, a running sum of quantity times price NOTE: Don't worry about handling bad inputs for this exercise. Example Run: ``` E-Commerce Shopping Cart Enter product name or 'checkout':pencil Enter pencil Price:0.99 Enter pencil Quantity:10 Enter product name or 'checkout':calculator Enter calculator Price:9.99 Enter calculator Quantity:1 Enter product name or 'checkout':checkout pencil 10 $0.99 calculator 1 $9.99 TOTAL: $19.89 ``` Start out your program by writing your TODO list of steps you'll need to solve the problem! ## Step 1: Problem Analysis Inputs: Outputs: Algorithm (Steps in Program): ``` write algorithm here ``` ``` # STEP 2: Write code print("E-Commerce Shopping Cart") cart=[] name = 'o' total=0 while name != 'checkout': name=input('Enter Product name or "checkout": ') if name!='checkout': price=float(input("Enter %s Price: "%(name))) quantity=int(input("Enter %s Quantity: "%(name))) cart.append(name) cart.append(quantity) cart.append(price) total = total+price*quantity print(cart) print("Total: $%.2f"%(total)) ``` ## Step 3: Questions 1. How does using a Python dictionary simplify this program? (Think of how you would have to write this program if you did not use a dictionary) a dictionary can hold values for each specific variable 2. What Happens when you run the program and just type `checkout`. Does the program work as you would expect? it prints out empty values and then prints a blank total ## Reminder of Evaluation Criteria 1. What the problem attempted (analysis, code, and answered questions) ? 2. What the problem analysis thought out? (does the program match the plan?) 3. Does the code execute without syntax error? 4. Does the code solve the intended problem? 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors)
github_jupyter
E-Commerce Shopping Cart Enter product name or 'checkout':pencil Enter pencil Price:0.99 Enter pencil Quantity:10 Enter product name or 'checkout':calculator Enter calculator Price:9.99 Enter calculator Quantity:1 Enter product name or 'checkout':checkout pencil 10 $0.99 calculator 1 $9.99 TOTAL: $19.89 write algorithm here # STEP 2: Write code print("E-Commerce Shopping Cart") cart=[] name = 'o' total=0 while name != 'checkout': name=input('Enter Product name or "checkout": ') if name!='checkout': price=float(input("Enter %s Price: "%(name))) quantity=int(input("Enter %s Quantity: "%(name))) cart.append(name) cart.append(quantity) cart.append(price) total = total+price*quantity print(cart) print("Total: $%.2f"%(total))
0.162413
0.879923
# Graph-tool tutorial The following tutorial demonstrates working with graphs using the [graph-tool python module](https://graph-tool.skewed.de/). In the process, you will learn how to: * create a graph 'by-hand' * perform basic network analysis * visualize graphs and their properties ``` import graph_tool.all as gt import pandas as pd import numpy as np from IPython.display import display %matplotlib inline print("graph-tool version: {}".format(gt.__version__.split(' ')[0])) ``` # Show datasets in collection ``` with pd.option_context('display.max_colwidth', -1): display(pd.DataFrame.from_records(gt.collection.descriptions, index=['description']).transpose()) g = gt.collection.data['karate'] g # construct a simple drawing of this graph ``` # Another graph example ``` # If you run this for the first time, download the data with the command below #!wget https://git.skewed.de/count0/graph-tool/raw/2c8c9899dd05549eaef728dabd93dc0759a2d4e0/doc/search_example.xml gs = gt.load_graph("search_example.xml") # TODO: print available edge and vertex properties # TODO: visualize edge weight and names ``` # Social graph drawing 101 ``` X_knows = { 'Mary': ['Peter', 'Albert', 'DavidF', 'Peter'], 'Judy': ['Bob', 'Alan'], 'Peter': ['Mary', 'DavidF', 'Jon'], 'DavidF': ['Albert', 'Joseph', 'Peter', 'Mary'], 'Jon': ['Peter', 'Joseph', 'DavidE'], 'DavidE': ['Jon', 'Joseph', 'Albert'], 'Joseph': ['DavidE', 'Jon', 'DavidF'], 'Bob': ['Judy', 'Alan'], 'Alan': ['Bob', 'Mary', 'Judy'], 'Albert': ['DavidF', 'Mary', 'DavidE'], } g = gt.Graph(directed=True) ``` # Create a graph using Python iterations Below is a slow and tedious version of what can be done with a single call to `add_edge_list(...)` on a `Graph`. ``` # Create edge tuples and list of unique names X_edges = list((n,k) for n in X_knows for k in X_knows[n]) from functools import reduce X_names = reduce(lambda a,b: set(a).union(b), (X_knows[n] for n in X_knows) ).union(X_knows.keys()) X_names = list(X_names) # Construct a 'StringIndexer' to convert strings to integers from sklearn import preprocessing le = preprocessing.LabelEncoder() lem = le.fit(list(X_names)) X_edges = list(map(lem.transform, X_edges)) # Create Graph object and add a string property for names g2 = gt.Graph() v_name = g2.new_vertex_property('string') g2.vertex_properties['name'] = v_name for vn in lem.classes_: v = g2.add_vertex() v_name[v] = vn for f,t in X_edges: g2.add_edge(f,t) # TODO: Same as above, make a tidy, undirectional drawing of this graph ``` # Fast graph construction ``` # TODO: find one-line call to g.add_edge_list that constructs the X_knows graph # hint: use nested list comprehension to reshape the dictionary # TODO: Create an undirected view of this graph # Tidy up parallel edges # Try two different layouts presented in the tutorial # Produce a tidy drawing of the undirected graph ``` # Graph analysis Work through the [graph filtering examples](https://graph-tool.skewed.de/static/doc/quickstart.html#graph-views) to draw a view of a relevant graph measure, such as betweenness. Use one of the graphs constructed above.
github_jupyter
import graph_tool.all as gt import pandas as pd import numpy as np from IPython.display import display %matplotlib inline print("graph-tool version: {}".format(gt.__version__.split(' ')[0])) with pd.option_context('display.max_colwidth', -1): display(pd.DataFrame.from_records(gt.collection.descriptions, index=['description']).transpose()) g = gt.collection.data['karate'] g # construct a simple drawing of this graph # If you run this for the first time, download the data with the command below #!wget https://git.skewed.de/count0/graph-tool/raw/2c8c9899dd05549eaef728dabd93dc0759a2d4e0/doc/search_example.xml gs = gt.load_graph("search_example.xml") # TODO: print available edge and vertex properties # TODO: visualize edge weight and names X_knows = { 'Mary': ['Peter', 'Albert', 'DavidF', 'Peter'], 'Judy': ['Bob', 'Alan'], 'Peter': ['Mary', 'DavidF', 'Jon'], 'DavidF': ['Albert', 'Joseph', 'Peter', 'Mary'], 'Jon': ['Peter', 'Joseph', 'DavidE'], 'DavidE': ['Jon', 'Joseph', 'Albert'], 'Joseph': ['DavidE', 'Jon', 'DavidF'], 'Bob': ['Judy', 'Alan'], 'Alan': ['Bob', 'Mary', 'Judy'], 'Albert': ['DavidF', 'Mary', 'DavidE'], } g = gt.Graph(directed=True) # Create edge tuples and list of unique names X_edges = list((n,k) for n in X_knows for k in X_knows[n]) from functools import reduce X_names = reduce(lambda a,b: set(a).union(b), (X_knows[n] for n in X_knows) ).union(X_knows.keys()) X_names = list(X_names) # Construct a 'StringIndexer' to convert strings to integers from sklearn import preprocessing le = preprocessing.LabelEncoder() lem = le.fit(list(X_names)) X_edges = list(map(lem.transform, X_edges)) # Create Graph object and add a string property for names g2 = gt.Graph() v_name = g2.new_vertex_property('string') g2.vertex_properties['name'] = v_name for vn in lem.classes_: v = g2.add_vertex() v_name[v] = vn for f,t in X_edges: g2.add_edge(f,t) # TODO: Same as above, make a tidy, undirectional drawing of this graph # TODO: find one-line call to g.add_edge_list that constructs the X_knows graph # hint: use nested list comprehension to reshape the dictionary # TODO: Create an undirected view of this graph # Tidy up parallel edges # Try two different layouts presented in the tutorial # Produce a tidy drawing of the undirected graph
0.279533
0.950227
``` import numpy as np import pandas as pd import re import pdb from matplotlib import pyplot as plt from scipy import stats as st %matplotlib inline gifts=pd.read_csv('gifts.csv') gifts.shape gifts=gifts.assign(gift=gifts.GiftId.apply(lambda x: x.split('_')[0]).apply(lambda x: re.sub("s$","",x))) set(gifts.gift) gifts.groupby('gift').count() def pound_weighted_gifts(): weights = {'horse' : max(0, np.random.normal(5,2,1)[0]), 'ball' : max(0, 1 + np.random.normal(1,0.3,1)[0]), 'bike' : max(0, np.random.normal(20,10,1)[0]), 'train' : max(0, np.random.normal(10,5,1)[0]), 'coal' : 47 * np.random.beta(0.5,0.5,1)[0], 'book' : np.random.chisquare(2,1)[0], 'doll' : np.random.gamma(5,1,1)[0], 'block' : np.random.triangular(5,10,20,1)[0], 'gloves' : 3.0 + np.random.rand(1)[0] if np.random.rand(1) < 0.3 else np.random.rand(1)[0]} weights = pd.DataFrame(weights,index=['lbs']).T adjcolnames=pd.DataFrame(weights.index.tolist(),index=weights.index,columns=['colnames']).colnames.apply(lambda x: re.sub("s$","",x)) weights.index=adjcolnames return weights def pound_weighted_gifts(gifttype): weights = {'horse' : max(0, np.random.normal(5,2,1)[0]), 'ball' : max(0, 1 + np.random.normal(1,0.3,1)[0]), 'bike' : max(0, np.random.normal(20,10,1)[0]), 'train' : max(0, np.random.normal(10,5,1)[0]), 'coal' : 47 * np.random.beta(0.5,0.5,1)[0], 'book' : np.random.chisquare(2,1)[0], 'doll' : np.random.gamma(5,1,1)[0], 'block' : np.random.triangular(5,10,20,1)[0], 'glove' : 3.0 + np.random.rand(1)[0] if np.random.rand(1) < 0.3 else np.random.rand(1)[0]} return weights[gifttype] weights=gifts.gift.apply(pound_weighted_gifts) weight_dist=pound_weighted_gifts() for i in range(100): weight_dist=weight_dist.append(pound_weighted_gifts()) len(set(weight_dist.index.values)) fig_size = plt.rcParams["figure.figsize"] fig_size[0] = 6 fig_size[1] = 4 def plot_weights(x,bins): fig, ax1 = plt.subplots() ax1.set_ylabel('freq') ax1.hist(np.ravel(x),bins=bins,histtype='step') x.sort() ax2 = ax1.twinx() ax2.set_ylabel('prob') ax2.plot(np.ravel(x),st.norm.pdf(np.ravel(x),loc=np.mean(x),scale=np.std(x)),'--k') plt.show() plot_weights(weight_dist.loc['coal',].iloc[:,0].tolist(),10) j=1 for i in set(weight_dist.index.values): plt.subplot(3,3,j) plt.hist(weight_dist.loc[i,:].values,bins=10,histtype='step') plt.title(i) j+=1 plt.show() ``` ## a bag ``` class bag: def __init__(self,gifts=None): self.gifts, self.num, self.lbs, self.weight = [], 0, [], 0 if gifts is not None: self.gifts=gifts.GiftId.tolist() self.lbs=gifts.lbs.tolist() self.number() self.weigh() def empty(self): self.gifts.pop() self.lbs.pop() self.number() self.weigh() def fill(self,gifts): self.gifts.append(gifts.GiftId) self.lbs.append(gifts.lbs) self.number() self.weigh() def number(self): self.num=len(self.gifts) def weigh(self): self.weight=np.sum(self.lbs) ``` ## a consignment of bags ``` class consignment: def __init__(self, bags=None): self.inventory, self.items, self.bag_items, self.num, self.weight = [], [], [], 0, 0 if bags is not None: for bag1 in bags: self.addbag(bag1) def addbag(self,bag1): if (bag1.weight<=50) and (bag1.num>=3): self.inventory.append(bag1) if len(self.inventory)>1000: lbs=[bag1.weight for bag1 in self.inventory] discount=lbs.index(np.min(lbs)) del self.inventory[discount] #lbs.sort(reverse=True) #lbs=lbs[0:999] #self.inventory=[bag1 for bag1 in self.inventory if bag1.weight in lbs] self.manifest() def manifest(self): self.items, self.bag_items, self.num, self.weight = [], [], 0, 0 for bag1 in self.inventory: self.items.extend(bag1.gifts) self.bag_items.append(" ".join([j for j in bag1.gifts])) self.num+=bag1.num self.weight+=bag1.weight ``` ## creating a consignment #### fill all bags ``` def bagallbags(gifts): picked, con1 = [], consignment() for i in range(1000): bag1, j = bag(), 0 while (not(np.all(gifts.index.isin(picked)))) and (j<10): pick=np.random.choice(gifts[~gifts.index.isin(picked)].index.tolist(),size=1)[0] picked.append(pick) bag1.fill(gifts.iloc[pick]) if bag1.weight>50: bag1.empty() picked.pop() if bag1.num>=3: break j+=1 con1.addbag(bag1) return con1 ``` #### bag all gifts ``` def bagallgifts(gifts): i, picked, con1 = 0, [], consignment() for i in range(gifts.shape[0]): bag1, j = bag(), 0 while (not(np.all(gifts.index.isin(picked)))) and (j<10): pick=np.random.choice(gifts[~gifts.index.isin(picked)].index.values,size=1)[0] picked.append(pick) bag1.fill(gifts.iloc[pick]) if bag1.weight>50: bag1.empty() picked.pop() if bag1.num>=3: break j+=1 con1.addbag(bag1) if len(picked)==gifts.shape[0]: break return con1 ``` #### bag all gifts adjusting bag weight with Standard Error of Mean ``` def varadjbagallgifts(gifts,adj): picked, con1 = [], consignment() while (not(np.all(gifts.GiftId.isin(picked)))): #pdb.set_trace() bag1, j = bag(), 0 while (not(np.all(gifts.GiftId.isin(picked)))) and (j<10): pick=np.random.choice(gifts[~gifts.GiftId.isin(picked)].index.values,size=1)[0] picked.append(gifts.GiftId.iloc[pick]) bag1.fill(gifts.iloc[pick]) mu=np.mean(bag1.lbs) SEM=np.std(bag1.lbs)/np.sqrt(bag1.num) varadjbagweight=(mu+adj*SEM)*bag1.num if bag1.num>3 and varadjbagweight>50: bag1.empty() picked.pop() j+=1 con1.addbag(bag1) return con1 ``` #### bag similar gifts adjusting bag weight with Standard Error of Mean #### testing all gifts ``` #gifts2=gifts.assign(lbs=gifts.merge(pound_weighted_gifts(),left_on='gift',right_index=True).loc[:,'lbs']) gifts2=gifts.assign(lbs=weights) con0 = varadjbagallgifts(gifts2,2) len(con0.inventory), con0.num, con0.weight ``` #### testing similar gifts #### list of consigments ``` conlist = gifts2.groupby('gift').apply(lambda x: varadjbagallgifts(x.reset_index(drop=True),2)) ``` #### bagged gifts ``` allpicked=[] [allpicked.extend(i.items) for i in conlist.tolist()] len(allpicked) ``` #### creating a big consignment ``` con0=consignment() [con0.addbag(j) for i in conlist.tolist() for j in i.inventory] len(con0.inventory), con0.num, con0.weight ``` #### creating a big inventory ``` inventory=con0.inventory ``` #### baging unpicked ``` con0=consignment() con1=varadjbagallgifts(gifts2[~gifts2.GiftId.isin(allpicked)].reset_index(drop=True),2) ``` #### adding to inventory ``` inventory.extend(con1.inventory) [con0.addbag(i) for i in inventory] len(con0.inventory), con0.num, con0.weight plot_weights([con0.inventory[i].weight for i in range(len(con0.inventory))],10) pd.DataFrame(con0.bag_items,columns=['Gifts']).to_csv('submission2_0110.csv',index=False) ``` ## Natural selection ``` #weights=pound_weighted_gifts() def selection(cutoff,adj,gifts=gifts,weights=weights): #gifts2=gifts.assign(lbs=gifts.merge(weights,left_on='gift',right_index=True).loc[:,'lbs']) gifts2=gifts.assign(lbs=weights) con1 = consignment() for j in range(100): if (len(con1.inventory)<1000) and (con1.num<gifts.shape[0]): #con0=bagallgifts(gifts2) con0=varadjbagallgifts(gifts2,adj) [con1.addbag(con0.inventory[i]) for i in range(len(con0.inventory)) if con0.inventory[i].weight>=cutoff] print(con1.weight) gifts2=gifts2[~gifts2.GiftId.isin(con1.items)].reset_index(drop=True) return(con1) con1=selection(40,2) len(con1.inventory), con1.num, con1.weight pd.DataFrame(con1.bag_items,columns=['Gifts']).to_csv('submission12_3009.csv',index=False) ``` ## Simulated Annealing ``` def anneal(T1,cutoff,rigor,gifts=gifts): gifts=pounds(gifts) con0=baggingallbags(gifts) retries=[] for dT in range(T1): T2=T1-dT cutoff+=dT*rigor/T1 con1=baggingallbags(gifts) delta=con1.weight-con0.weight p=np.exp(delta/T2) if (delta>0) or (p>cutoff): con0=con1 else: retries.insert(0,dT) if len(retries)>10: if np.sum([retries[i]-retries[i+1] for i in range(10)])==10: print("converged at:",con0.weight) break return con0 con=anneal(T1=1000,cutoff=0.1,rigor=0.8) def anneal(steps,pcutoff,rigor,wcutoff,gifts=gifts,weights=weights,T1=50): #N=gifts.merge(weights,left_on='gift',right_index=True).loc[:,'lbs'].sum() N=weights.sum() con0=selection(wcutoff,gifts,weights) retries=[] for dT in np.linspace(wcutoff,T1,steps): T2=T1-dT pcutoff+=dT*rigor/T1 con1=selection(dT,gifts,weights) delfitness=(con1.weight-con0.weight)/N p=np.exp(delfitness/T2) if (delfitness>0) or (p>pcutoff): con0=con1 else: retries.insert(0,dT) if len(retries)>10: if np.sum([retries[i]-retries[i+1] for i in range(10)])==10*step: print("converged at:",con0.weight) break return con0 con=anneal(steps=25,pcutoff=0.5,rigor=0.5,wcutoff=40) len(con.inventory), con.num, con.weight pd.DataFrame(con.bag_items,columns=['Gifts']).to_csv('submission2_2909.csv',index=False) ``` ## Sampling distribution of gifts ``` def samplegifts(size,gifts=gifts,weights=weights): inventory, picked = [], [] #gifts2=gifts.assign(lbs=gifts.merge(weights,left_on='gift',right_index=True).loc[:,'lbs']) gifts2=gifts.assign(lbs=weights) while np.sum(~gifts2.index.isin(picked))>=size: pick=np.random.choice(gifts2[~gifts2.index.isin(picked)].index.values,size=size) inventory.append(gifts2.loc[gifts2.index.isin(pick),'lbs'].mean()) picked.extend(pick) return inventory inventory=samplegifts(3) np.std(inventory) plot_weights(inventory,10) ``` ## Cluster weights (k < len(set(gifts.gift))) ## weight Baised choice
github_jupyter
import numpy as np import pandas as pd import re import pdb from matplotlib import pyplot as plt from scipy import stats as st %matplotlib inline gifts=pd.read_csv('gifts.csv') gifts.shape gifts=gifts.assign(gift=gifts.GiftId.apply(lambda x: x.split('_')[0]).apply(lambda x: re.sub("s$","",x))) set(gifts.gift) gifts.groupby('gift').count() def pound_weighted_gifts(): weights = {'horse' : max(0, np.random.normal(5,2,1)[0]), 'ball' : max(0, 1 + np.random.normal(1,0.3,1)[0]), 'bike' : max(0, np.random.normal(20,10,1)[0]), 'train' : max(0, np.random.normal(10,5,1)[0]), 'coal' : 47 * np.random.beta(0.5,0.5,1)[0], 'book' : np.random.chisquare(2,1)[0], 'doll' : np.random.gamma(5,1,1)[0], 'block' : np.random.triangular(5,10,20,1)[0], 'gloves' : 3.0 + np.random.rand(1)[0] if np.random.rand(1) < 0.3 else np.random.rand(1)[0]} weights = pd.DataFrame(weights,index=['lbs']).T adjcolnames=pd.DataFrame(weights.index.tolist(),index=weights.index,columns=['colnames']).colnames.apply(lambda x: re.sub("s$","",x)) weights.index=adjcolnames return weights def pound_weighted_gifts(gifttype): weights = {'horse' : max(0, np.random.normal(5,2,1)[0]), 'ball' : max(0, 1 + np.random.normal(1,0.3,1)[0]), 'bike' : max(0, np.random.normal(20,10,1)[0]), 'train' : max(0, np.random.normal(10,5,1)[0]), 'coal' : 47 * np.random.beta(0.5,0.5,1)[0], 'book' : np.random.chisquare(2,1)[0], 'doll' : np.random.gamma(5,1,1)[0], 'block' : np.random.triangular(5,10,20,1)[0], 'glove' : 3.0 + np.random.rand(1)[0] if np.random.rand(1) < 0.3 else np.random.rand(1)[0]} return weights[gifttype] weights=gifts.gift.apply(pound_weighted_gifts) weight_dist=pound_weighted_gifts() for i in range(100): weight_dist=weight_dist.append(pound_weighted_gifts()) len(set(weight_dist.index.values)) fig_size = plt.rcParams["figure.figsize"] fig_size[0] = 6 fig_size[1] = 4 def plot_weights(x,bins): fig, ax1 = plt.subplots() ax1.set_ylabel('freq') ax1.hist(np.ravel(x),bins=bins,histtype='step') x.sort() ax2 = ax1.twinx() ax2.set_ylabel('prob') ax2.plot(np.ravel(x),st.norm.pdf(np.ravel(x),loc=np.mean(x),scale=np.std(x)),'--k') plt.show() plot_weights(weight_dist.loc['coal',].iloc[:,0].tolist(),10) j=1 for i in set(weight_dist.index.values): plt.subplot(3,3,j) plt.hist(weight_dist.loc[i,:].values,bins=10,histtype='step') plt.title(i) j+=1 plt.show() class bag: def __init__(self,gifts=None): self.gifts, self.num, self.lbs, self.weight = [], 0, [], 0 if gifts is not None: self.gifts=gifts.GiftId.tolist() self.lbs=gifts.lbs.tolist() self.number() self.weigh() def empty(self): self.gifts.pop() self.lbs.pop() self.number() self.weigh() def fill(self,gifts): self.gifts.append(gifts.GiftId) self.lbs.append(gifts.lbs) self.number() self.weigh() def number(self): self.num=len(self.gifts) def weigh(self): self.weight=np.sum(self.lbs) class consignment: def __init__(self, bags=None): self.inventory, self.items, self.bag_items, self.num, self.weight = [], [], [], 0, 0 if bags is not None: for bag1 in bags: self.addbag(bag1) def addbag(self,bag1): if (bag1.weight<=50) and (bag1.num>=3): self.inventory.append(bag1) if len(self.inventory)>1000: lbs=[bag1.weight for bag1 in self.inventory] discount=lbs.index(np.min(lbs)) del self.inventory[discount] #lbs.sort(reverse=True) #lbs=lbs[0:999] #self.inventory=[bag1 for bag1 in self.inventory if bag1.weight in lbs] self.manifest() def manifest(self): self.items, self.bag_items, self.num, self.weight = [], [], 0, 0 for bag1 in self.inventory: self.items.extend(bag1.gifts) self.bag_items.append(" ".join([j for j in bag1.gifts])) self.num+=bag1.num self.weight+=bag1.weight def bagallbags(gifts): picked, con1 = [], consignment() for i in range(1000): bag1, j = bag(), 0 while (not(np.all(gifts.index.isin(picked)))) and (j<10): pick=np.random.choice(gifts[~gifts.index.isin(picked)].index.tolist(),size=1)[0] picked.append(pick) bag1.fill(gifts.iloc[pick]) if bag1.weight>50: bag1.empty() picked.pop() if bag1.num>=3: break j+=1 con1.addbag(bag1) return con1 def bagallgifts(gifts): i, picked, con1 = 0, [], consignment() for i in range(gifts.shape[0]): bag1, j = bag(), 0 while (not(np.all(gifts.index.isin(picked)))) and (j<10): pick=np.random.choice(gifts[~gifts.index.isin(picked)].index.values,size=1)[0] picked.append(pick) bag1.fill(gifts.iloc[pick]) if bag1.weight>50: bag1.empty() picked.pop() if bag1.num>=3: break j+=1 con1.addbag(bag1) if len(picked)==gifts.shape[0]: break return con1 def varadjbagallgifts(gifts,adj): picked, con1 = [], consignment() while (not(np.all(gifts.GiftId.isin(picked)))): #pdb.set_trace() bag1, j = bag(), 0 while (not(np.all(gifts.GiftId.isin(picked)))) and (j<10): pick=np.random.choice(gifts[~gifts.GiftId.isin(picked)].index.values,size=1)[0] picked.append(gifts.GiftId.iloc[pick]) bag1.fill(gifts.iloc[pick]) mu=np.mean(bag1.lbs) SEM=np.std(bag1.lbs)/np.sqrt(bag1.num) varadjbagweight=(mu+adj*SEM)*bag1.num if bag1.num>3 and varadjbagweight>50: bag1.empty() picked.pop() j+=1 con1.addbag(bag1) return con1 #gifts2=gifts.assign(lbs=gifts.merge(pound_weighted_gifts(),left_on='gift',right_index=True).loc[:,'lbs']) gifts2=gifts.assign(lbs=weights) con0 = varadjbagallgifts(gifts2,2) len(con0.inventory), con0.num, con0.weight conlist = gifts2.groupby('gift').apply(lambda x: varadjbagallgifts(x.reset_index(drop=True),2)) allpicked=[] [allpicked.extend(i.items) for i in conlist.tolist()] len(allpicked) con0=consignment() [con0.addbag(j) for i in conlist.tolist() for j in i.inventory] len(con0.inventory), con0.num, con0.weight inventory=con0.inventory con0=consignment() con1=varadjbagallgifts(gifts2[~gifts2.GiftId.isin(allpicked)].reset_index(drop=True),2) inventory.extend(con1.inventory) [con0.addbag(i) for i in inventory] len(con0.inventory), con0.num, con0.weight plot_weights([con0.inventory[i].weight for i in range(len(con0.inventory))],10) pd.DataFrame(con0.bag_items,columns=['Gifts']).to_csv('submission2_0110.csv',index=False) #weights=pound_weighted_gifts() def selection(cutoff,adj,gifts=gifts,weights=weights): #gifts2=gifts.assign(lbs=gifts.merge(weights,left_on='gift',right_index=True).loc[:,'lbs']) gifts2=gifts.assign(lbs=weights) con1 = consignment() for j in range(100): if (len(con1.inventory)<1000) and (con1.num<gifts.shape[0]): #con0=bagallgifts(gifts2) con0=varadjbagallgifts(gifts2,adj) [con1.addbag(con0.inventory[i]) for i in range(len(con0.inventory)) if con0.inventory[i].weight>=cutoff] print(con1.weight) gifts2=gifts2[~gifts2.GiftId.isin(con1.items)].reset_index(drop=True) return(con1) con1=selection(40,2) len(con1.inventory), con1.num, con1.weight pd.DataFrame(con1.bag_items,columns=['Gifts']).to_csv('submission12_3009.csv',index=False) def anneal(T1,cutoff,rigor,gifts=gifts): gifts=pounds(gifts) con0=baggingallbags(gifts) retries=[] for dT in range(T1): T2=T1-dT cutoff+=dT*rigor/T1 con1=baggingallbags(gifts) delta=con1.weight-con0.weight p=np.exp(delta/T2) if (delta>0) or (p>cutoff): con0=con1 else: retries.insert(0,dT) if len(retries)>10: if np.sum([retries[i]-retries[i+1] for i in range(10)])==10: print("converged at:",con0.weight) break return con0 con=anneal(T1=1000,cutoff=0.1,rigor=0.8) def anneal(steps,pcutoff,rigor,wcutoff,gifts=gifts,weights=weights,T1=50): #N=gifts.merge(weights,left_on='gift',right_index=True).loc[:,'lbs'].sum() N=weights.sum() con0=selection(wcutoff,gifts,weights) retries=[] for dT in np.linspace(wcutoff,T1,steps): T2=T1-dT pcutoff+=dT*rigor/T1 con1=selection(dT,gifts,weights) delfitness=(con1.weight-con0.weight)/N p=np.exp(delfitness/T2) if (delfitness>0) or (p>pcutoff): con0=con1 else: retries.insert(0,dT) if len(retries)>10: if np.sum([retries[i]-retries[i+1] for i in range(10)])==10*step: print("converged at:",con0.weight) break return con0 con=anneal(steps=25,pcutoff=0.5,rigor=0.5,wcutoff=40) len(con.inventory), con.num, con.weight pd.DataFrame(con.bag_items,columns=['Gifts']).to_csv('submission2_2909.csv',index=False) def samplegifts(size,gifts=gifts,weights=weights): inventory, picked = [], [] #gifts2=gifts.assign(lbs=gifts.merge(weights,left_on='gift',right_index=True).loc[:,'lbs']) gifts2=gifts.assign(lbs=weights) while np.sum(~gifts2.index.isin(picked))>=size: pick=np.random.choice(gifts2[~gifts2.index.isin(picked)].index.values,size=size) inventory.append(gifts2.loc[gifts2.index.isin(pick),'lbs'].mean()) picked.extend(pick) return inventory inventory=samplegifts(3) np.std(inventory) plot_weights(inventory,10)
0.332852
0.649092
## HIV News articles extraction from The Kabul Times. Data Extraction of following parameters - Headline - Description - Author - Published Date - Category - Publication - News - URL - Keywords - Summary ### Importing the necessary Libraries ``` from newspaper import Article # Article scraping & curation from bs4 import BeautifulSoup # Python library for pulling data out of HTML and XML files from requests import get # standard for making HTTP requests in Python import pandas as pd # library written for data manipulation and analysis import sys, time # System-specific parameters and functions ``` ### Creating Empty lists for HIV News Articles parameters data to be extracted ``` headlines, descriptions, dates, authors, news, keywords, summaries, urls, category, publication = [], [], [], [], [], [], [], [], [], [] ``` ### Finding the total no.of.pages by total no.of articles from google search results¶ ``` url = 'https://thekabultimes.gov.af/?s=HIV' soup = BeautifulSoup(get(url).text, 'lxml') tokens = [soup.select('.page-numbers')[i].text for i in range(len(soup.select('.page-numbers')))] max_pages = [token for token in tokens if token.isdigit()] ``` ### Iterates max_pages value through while loop. Scraping the Articles urls ``` for page in max_pages: try: url = 'https://thekabultimes.gov.af/page/' + page + '/?s=HIV' soup = BeautifulSoup(get(url).text, 'lxml') # Extracts the Headlines try: headline = [soup.select('h2.entry-title')[i].text for i in range(len(soup.select('h2.entry-title')))] headlines.extend(headline) except: headlines.extend(None) # Extracts the Authors try: author = [soup.select('.entry-meta')[i].select_one('.author').text for i in range(len(soup.select('.entry-meta')))] authors.extend(author) except: authors.extend(None) # Extracts the published dates try: pub_date = [soup.select('.entry-meta')[i].select_one('.entry-date').text for i in range(len(soup.select('.entry-meta')))] dates.extend(pub_date) except: dates.extend(None) # Extracts the news category try: cat = [soup.select('.penci-cat-links')[i].text.split() for i in range(len(soup.select('.penci-cat-links')))] category.extend(cat) except: category.extend(None) # Extracts URL's for i in range(len(soup.select('h2.entry-title'))): urls.append(soup.select('h2.entry-title')[i].a['href']) except: pass sys.stdout.write('\r' + str(page) + '\r') sys.stdout.flush() ``` ### To remove duplicates urls entries in the list by executing below line ``` urls = list(dict.fromkeys(urls)) print("Total Extracted URL's are" + ' : ' + str(len(urls)), type(urls)) ``` ### Iterates urls through for loop. Scraping the Articles with above parameters ``` %%time for index, url in enumerate(urls): try: # Parse the url to NewsPlease article = Article(url) article.download() article.parse() article.nlp() # Extracts the Descriptions try: descriptions.append(article.meta_description.strip()) except: descriptions.append(None) # Extracts the news articles try: news.append(' '.join(article.text.split()).replace("\'\'"," ").replace("\'", "").replace(" / ", "")) except: news.append(None) # Extracts Keywords and Summaries try: keywords.append(article.keywords) summaries.append(' '.join(article.summary.split())) except: keywords.append(None) summaries.append(None) # Extracts the news publication try: publication.append(article.meta_data['og']['site_name']) except: publication.append(None) except: descriptions.append(None) news.append(None) keywords.append(None) publication.append(None) summaries.append(None) sys.stdout.write('\r' + str(index) + ' : ' + str(url) + '\r') sys.stdout.flush() ``` ### Checking Array Length of each list to create DataFrame ``` print(len(headlines), len(descriptions), len(authors), len(dates), len(category), len(publication), len(news), len(keywords), len(summaries), len(urls)) ``` ### Creating a csv file after checking array length and droping the missing values from the dataset ``` if len(headlines) == len(descriptions) == len(authors) == len(dates) == len(news) == len(publication) == len(keywords) == len(summaries) == len(urls) == len(category): tbl = pd.DataFrame({'Headlines' : headlines, 'Descriptions' : descriptions, 'Authors' : authors, 'Published_Dates' : dates, 'Publication' : publication, 'Articles' : news, 'category' : category, 'Keywords' : keywords, 'Summaries' : summaries, 'Source_URLs' : urls}) tbl.dropna() path = 'D:\\#Backups\\Desktop\\!Code!\\CDRI\\HIV\\Data Extraction\\#Datasets\\' tbl.to_csv(path+'The_Kabul_Times.csv', index=False) else: print('Array lenght does not match!') tbl.head() tbl.shape ```
github_jupyter
from newspaper import Article # Article scraping & curation from bs4 import BeautifulSoup # Python library for pulling data out of HTML and XML files from requests import get # standard for making HTTP requests in Python import pandas as pd # library written for data manipulation and analysis import sys, time # System-specific parameters and functions headlines, descriptions, dates, authors, news, keywords, summaries, urls, category, publication = [], [], [], [], [], [], [], [], [], [] url = 'https://thekabultimes.gov.af/?s=HIV' soup = BeautifulSoup(get(url).text, 'lxml') tokens = [soup.select('.page-numbers')[i].text for i in range(len(soup.select('.page-numbers')))] max_pages = [token for token in tokens if token.isdigit()] for page in max_pages: try: url = 'https://thekabultimes.gov.af/page/' + page + '/?s=HIV' soup = BeautifulSoup(get(url).text, 'lxml') # Extracts the Headlines try: headline = [soup.select('h2.entry-title')[i].text for i in range(len(soup.select('h2.entry-title')))] headlines.extend(headline) except: headlines.extend(None) # Extracts the Authors try: author = [soup.select('.entry-meta')[i].select_one('.author').text for i in range(len(soup.select('.entry-meta')))] authors.extend(author) except: authors.extend(None) # Extracts the published dates try: pub_date = [soup.select('.entry-meta')[i].select_one('.entry-date').text for i in range(len(soup.select('.entry-meta')))] dates.extend(pub_date) except: dates.extend(None) # Extracts the news category try: cat = [soup.select('.penci-cat-links')[i].text.split() for i in range(len(soup.select('.penci-cat-links')))] category.extend(cat) except: category.extend(None) # Extracts URL's for i in range(len(soup.select('h2.entry-title'))): urls.append(soup.select('h2.entry-title')[i].a['href']) except: pass sys.stdout.write('\r' + str(page) + '\r') sys.stdout.flush() urls = list(dict.fromkeys(urls)) print("Total Extracted URL's are" + ' : ' + str(len(urls)), type(urls)) %%time for index, url in enumerate(urls): try: # Parse the url to NewsPlease article = Article(url) article.download() article.parse() article.nlp() # Extracts the Descriptions try: descriptions.append(article.meta_description.strip()) except: descriptions.append(None) # Extracts the news articles try: news.append(' '.join(article.text.split()).replace("\'\'"," ").replace("\'", "").replace(" / ", "")) except: news.append(None) # Extracts Keywords and Summaries try: keywords.append(article.keywords) summaries.append(' '.join(article.summary.split())) except: keywords.append(None) summaries.append(None) # Extracts the news publication try: publication.append(article.meta_data['og']['site_name']) except: publication.append(None) except: descriptions.append(None) news.append(None) keywords.append(None) publication.append(None) summaries.append(None) sys.stdout.write('\r' + str(index) + ' : ' + str(url) + '\r') sys.stdout.flush() print(len(headlines), len(descriptions), len(authors), len(dates), len(category), len(publication), len(news), len(keywords), len(summaries), len(urls)) if len(headlines) == len(descriptions) == len(authors) == len(dates) == len(news) == len(publication) == len(keywords) == len(summaries) == len(urls) == len(category): tbl = pd.DataFrame({'Headlines' : headlines, 'Descriptions' : descriptions, 'Authors' : authors, 'Published_Dates' : dates, 'Publication' : publication, 'Articles' : news, 'category' : category, 'Keywords' : keywords, 'Summaries' : summaries, 'Source_URLs' : urls}) tbl.dropna() path = 'D:\\#Backups\\Desktop\\!Code!\\CDRI\\HIV\\Data Extraction\\#Datasets\\' tbl.to_csv(path+'The_Kabul_Times.csv', index=False) else: print('Array lenght does not match!') tbl.head() tbl.shape
0.195594
0.683074
[Image1]: ./Images/Goldstein-Cap1-Exercise-21.png "Problem diagram" # Exercise 1.21 - Goldstein ![Problem diagram][Image1] ### Equations of motion $$ - T_{A} = m_{A} ( \; \ddot{r} - r \; \dot{\theta}^{2} \;)$$ $$ 0 = m_{A} ( \; r \; \ddot{\theta} + 2 \dot{r} \; \dot{\theta} \; )$$ $$ m_{B} \; g - T_{B} = m_{B} \; \ddot{z} $$ This equations can be reduced to: $$ \ddot{r} = \frac{\frac{L^{2}}{m_{A} \; r^{3}} - m_{B} \; g}{m_{A} + m_{B}}$$ Where: $$ L = m_{A} \; r^{2} \; \dot{\theta} $$ As $L$ is a constant of motion, we concentrate on differential equation for $r$. As it is an equation of second order, we have to convert it to a system of first order equations. Define: $$ \dot{r} = v_{r} $$ $$ \dot{v}_{r} = \frac{L^{2}}{m_{A} \; (m_{A} + m_{B})} \frac{1}{r^{3}} - \frac{m_{B}}{m_{A} + m_{B}} g $$ Where we are working in space $( \; r \; , \; v_{r} \;) \equiv ( \; r \; , \; \dot{r} \;) $ ``` #Libraries import numpy as np import matplotlib.pyplot as plt from scipy.integrate import odeint # For simulation from matplotlib import animation, rc from IPython.display import HTML # Functions for solving differential equations and to show fluxes in phase portrait def fr(vr): return vr def fvr(L, g, mA, mB, r): return L**2 / (mA*(mA + mB)) * 1/r**3 - mB/(mA + mB) * g def dydt(y, t, L, g, mA, mB): r, vr, theta = y dr = fr(vr) dvr = fvr(L, g, mA, mB, r) dtheta = L /(mA * r**2) return [dr, dvr, dtheta] mA = 1 mB = 1 r0 = 3 vr0 = 0 theta0 = 0 omega0 = 2 g = 9.8 L = mA * r0**2 * omega0 print('L = ', L) t = np.linspace(0, 30, 1000) y0 = [r0, vr0, theta0] sol = odeint(dydt, y0, t, args=(L, g, mA, mB)) r = sol[:, 0] vr = sol[:, 1] theta = sol[:, 2] plt.close() plt.title('Time series') plt.plot(t, r, 'b', label='r(t)') plt.plot(t, vr, 'g', label='vr(t)') plt.legend(loc='best') plt.xlabel('t') plt.grid() plt.show() plt.close() plt.title('On the table') plt.plot(r*np.cos(theta), r*np.sin(theta),"--") plt.xlabel('y') plt.ylabel('x') plt.grid() plt.show() # First set up the figure, the axis, and the plot element we want to animate fig, ax = plt.subplots() ax.set_xlim(( -4, 4)) ax.set_ylim((-4, 4)) point, = ax.plot([], []) # initialization function: plot the background of each frame def init(): x0 = r0*np.cos(theta0) y0 = r0*np.sin(theta0) point.set_data(x0, y0) return (point,) # animation function. This is called sequentially def animate(i): x = r[:i]*np.cos(theta[:i]) y = r[:i]*np.sin(theta[:i]) point.set_data(x, y) return (point,) # call the animator. blit=True means only re-draw the parts that have changed. anim = animation.FuncAnimation(fig, animate, init_func=init, frames=1000, interval=20, blit=True) HTML(anim.to_html5_video()) # x = r, y = vr w = 10 Y, X = np.mgrid[-w:w:100j, 0:w:100j] Vx = fr(Y) Vy = fvr(L, g, mA, mB, X) speed = np.sqrt(Vx*Vx + Vy*Vy) plt.close() fig, ax = plt.subplots(figsize=(6,8)) strm = ax.streamplot(X, Y, Vx, Vy, color=speed/speed.max(), linewidth=2, cmap=plt.cm.autumn) fig.colorbar(strm.lines) ax.grid() plt.show() ```
github_jupyter
#Libraries import numpy as np import matplotlib.pyplot as plt from scipy.integrate import odeint # For simulation from matplotlib import animation, rc from IPython.display import HTML # Functions for solving differential equations and to show fluxes in phase portrait def fr(vr): return vr def fvr(L, g, mA, mB, r): return L**2 / (mA*(mA + mB)) * 1/r**3 - mB/(mA + mB) * g def dydt(y, t, L, g, mA, mB): r, vr, theta = y dr = fr(vr) dvr = fvr(L, g, mA, mB, r) dtheta = L /(mA * r**2) return [dr, dvr, dtheta] mA = 1 mB = 1 r0 = 3 vr0 = 0 theta0 = 0 omega0 = 2 g = 9.8 L = mA * r0**2 * omega0 print('L = ', L) t = np.linspace(0, 30, 1000) y0 = [r0, vr0, theta0] sol = odeint(dydt, y0, t, args=(L, g, mA, mB)) r = sol[:, 0] vr = sol[:, 1] theta = sol[:, 2] plt.close() plt.title('Time series') plt.plot(t, r, 'b', label='r(t)') plt.plot(t, vr, 'g', label='vr(t)') plt.legend(loc='best') plt.xlabel('t') plt.grid() plt.show() plt.close() plt.title('On the table') plt.plot(r*np.cos(theta), r*np.sin(theta),"--") plt.xlabel('y') plt.ylabel('x') plt.grid() plt.show() # First set up the figure, the axis, and the plot element we want to animate fig, ax = plt.subplots() ax.set_xlim(( -4, 4)) ax.set_ylim((-4, 4)) point, = ax.plot([], []) # initialization function: plot the background of each frame def init(): x0 = r0*np.cos(theta0) y0 = r0*np.sin(theta0) point.set_data(x0, y0) return (point,) # animation function. This is called sequentially def animate(i): x = r[:i]*np.cos(theta[:i]) y = r[:i]*np.sin(theta[:i]) point.set_data(x, y) return (point,) # call the animator. blit=True means only re-draw the parts that have changed. anim = animation.FuncAnimation(fig, animate, init_func=init, frames=1000, interval=20, blit=True) HTML(anim.to_html5_video()) # x = r, y = vr w = 10 Y, X = np.mgrid[-w:w:100j, 0:w:100j] Vx = fr(Y) Vy = fvr(L, g, mA, mB, X) speed = np.sqrt(Vx*Vx + Vy*Vy) plt.close() fig, ax = plt.subplots(figsize=(6,8)) strm = ax.streamplot(X, Y, Vx, Vy, color=speed/speed.max(), linewidth=2, cmap=plt.cm.autumn) fig.colorbar(strm.lines) ax.grid() plt.show()
0.670824
0.984898
Deep Learning ============= Assignment 1 ------------ The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later. This notebook uses the [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) dataset to be used with python experiments. This dataset is designed to look like the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST. ``` # These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import matplotlib.pyplot as plt import numpy as np import os import sys import tarfile from IPython.display import display, Image from scipy import ndimage from sklearn.linear_model import LogisticRegression from six.moves.urllib.request import urlretrieve from six.moves import cPickle as pickle # Need to use matplotlib inline to produce plots inside jupyter notebook %matplotlib inline ``` First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine. ``` !pwd ``` <h3 align='Middle'>The following cell will not run without internet connection. Be sure to acquire the files in another way</h3> ``` url = 'http://commondatastorage.googleapis.com/books1000/' last_percent_reported = None def download_progress_hook(count, blockSize, totalSize): """A hook to report the progress of a download. This is mostly intended for users with slow internet connections. Reports every 1% change in download progress. """ global last_percent_reported percent = int(count * blockSize * 100 / totalSize) if last_percent_reported != percent: if percent % 5 == 0: sys.stdout.write("%s%%" % percent) sys.stdout.flush() else: sys.stdout.write(".") sys.stdout.flush() last_percent_reported = percent def maybe_download(filename, expected_bytes, force=False): """Download a file if not present, and make sure it's the right size.""" if force or not os.path.exists(filename): print('Attempting to download:', filename) filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook) print('\nDownload Complete!') statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?') return filename train_filename = maybe_download('notMNIST_large.tar.gz', 247336696) test_filename = maybe_download('notMNIST_small.tar.gz', 8458043) ``` Extract the dataset from the compressed .tar.gz file. This should give you a set of directories, labelled A through J. ``` num_classes = 10 np.random.seed(133) def maybe_extract(filename, force=False): root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz if os.path.isdir(root) and not force: # You may override by setting force=True. print('%s already present - Skipping extraction of %s.' % (root, filename)) else: print('Extracting data for %s. This may take a while. Please wait.' % root) tar = tarfile.open(filename) sys.stdout.flush() tar.extractall() tar.close() data_folders = [os.path.join(root, d) for d in sorted(os.listdir(root)) if os.path.isdir(os.path.join(root, d))] if len(data_folders) != num_classes: raise Exception( 'Expected %d folders, one per class. Found %d instead.' % ( num_classes, len(data_folders))) print(data_folders) return data_folders train_folders = maybe_extract(train_filename) test_folders = maybe_extract(test_filename) ``` --- Problem 1 --------- Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display. --- ``` import random import hashlib %matplotlib inline def disp_samples(data_folders, sample_size): for folder in data_folders: print(folder) image_files = os.listdir(folder) image_sample = random.sample(image_files, sample_size) for image in image_sample: image_file = os.path.join(folder, image) i = Image(filename=image_file) display(i) disp_samples(train_folders, 1) disp_samples(test_folders, 1) ``` Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size. We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road. A few images might not be readable, we'll just skip them. ``` image_size = 28 # Pixel width and height. pixel_depth = 255.0 # Number of levels per pixel. def load_letter(folder, min_num_images): """Load the data for a single letter label.""" image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size), dtype=np.float32) print(folder) num_images = 0 for image in image_files: image_file = os.path.join(folder, image) try: image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth if image_data.shape != (image_size, image_size): raise Exception('Unexpected image shape: %s' % str(image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] for folder in data_folders: set_filename = folder + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: # You may override by setting force=True. print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(folder, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names train_datasets = maybe_pickle(train_folders, 45000) test_datasets = maybe_pickle(test_folders, 1800) ``` <h3 align="left"> Display Sample Image </h3> ``` def ipython_display_samples(folders): print (folders) for i in folders: sample = np.random.choice(os.listdir(i), 1)[0] display(Image(os.path.join(i, sample))) # train folders ipython_display_samples(train_folders) # test folders ipython_display_samples(test_folders) ``` --- Problem 2 --------- Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot. --- ``` def matplotlib_pyplot_samples(pkls): for idx, pkl in enumerate(pkls): with open(pkl, 'r') as f: dataset = pickle.load(f) sample = np.random.choice(len(dataset), 5) plt.figure(idx+1) for i, k in enumerate(sample): plt.subplot(1,5,i+1) plt.imshow(dataset[k]) matplotlib_pyplot_samples(train_datasets) ``` --- Problem 3 --------- Another check: we expect the data to be balanced across classes. Verify that. --- ``` def balance_check(pkls): for pkl in pkls: with open(pkl, 'r') as f: dataset = pickle.load(f) print (pkl, len(dataset)) balance_check(train_datasets) balance_check(test_datasets) ``` Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune `train_size` as needed. The labels will be stored into a separate array of integers 0 through 9. Also create a validation dataset for hyperparameter tuning. ``` def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class+tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) # let's shuffle the letters to have random validation and training set np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels train_size = 200000 valid_size = 10000 test_size = 10000 valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets( train_datasets, train_size, valid_size) _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size) print('Training:', train_dataset.shape, train_labels.shape) print('Validation:', valid_dataset.shape, valid_labels.shape) print('Testing:', test_dataset.shape, test_labels.shape) ``` Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match. ``` def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation,:,:] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels train_dataset, train_labels = randomize(train_dataset, train_labels) test_dataset, test_labels = randomize(test_dataset, test_labels) valid_dataset, valid_labels = randomize(valid_dataset, valid_labels) ``` --- Problem 4 --------- Convince yourself that the data is still good after shuffling! --- To be sure that the data are still fine after the merger and the randomization, I will select one item and display the image alongside the label. Note: 0 = A, 1 = B, 2 = C, 3 = D, 4 = E, 5 = F, 6 = G, 7 = H, 8 = I, 9 = J. ``` pretty_labels = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J'} def disp_sample_dataset(dataset, labels): items = random.sample(range(len(labels)), 8) for i, item in enumerate(items): plt.subplot(2, 4, i+1) plt.axis('off') plt.title(pretty_labels[labels[item]]) plt.imshow(dataset[item]) disp_sample_dataset(train_dataset, train_labels) disp_sample_dataset(valid_dataset, valid_labels) disp_sample_dataset(test_dataset, test_labels) ``` Finally, let's save the data for later reuse: ``` pickle_file = 'notMNIST.pickle' try: f = open(pickle_file, 'wb') save = { 'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, 'test_dataset': test_dataset, 'test_labels': test_labels, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise statinfo = os.stat(pickle_file) print('Compressed pickle size:', statinfo.st_size) ``` --- Problem 5 --------- By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it. Measure how much overlap there is between training, validation and test samples. Optional questions: - What about near duplicates between datasets? (images that are almost identical) - Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments. --- ``` # I dont think doing number 5 is currently useful """ pickle_file = 'notMNIST2.pickle' try: f = open(pickle_file, 'wb') save = { 'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, 'test_dataset': test_dataset, 'test_labels': test_labels, 'new_valid_dataset': new_valid_dataset, 'new_valid_labels': new_valid_labels, 'new_test_dataset': new_test_dataset, 'new_test_labels': new_test_labels, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise """ ``` --- Problem 6 --------- Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it. Train a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model. Optional question: train an off-the-shelf model on all the data! --- ``` regr = LogisticRegression() X_test = test_dataset.reshape(test_dataset.shape[0], 28 * 28) y_test = test_labels ``` <h3 align='Left'>Note on the Following Training Sample Code</h3> The following code uses score which returns the mean accuracy on the given test data and labels. In multi-label classification (which is what question 6 is doing), the subset accuracy is a harsh metric since each label needs to be correctly predicted. Later on in the jupyter notebook, there will be code showing on which class the model is consistently predicting incorrectly. This is called the confusion matrix. http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html <h3 align='Left'>50 Training Samples</h3> ``` from sklearn import metrics sample_size = 50 X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] %time regr.fit(X_train, y_train) #regr.score(X_test, y_test) predicted = regr.predict(X_test) cm = metrics.confusion_matrix(y_test, predicted) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] import pandas as pd import seaborn as sns plt.figure(figsize=(9,9)) confusion_matrix = pd.DataFrame(data = cm_normalized) sns.heatmap(confusion_matrix, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r'); plt.ylabel('Actual label'); plt.xlabel('Predicted label'); plt.title('Confusion Matrix', size = 15); fig, axes = plt.subplots(nrows = 1, ncols = 3, figsize = (27,81)); sample_size = 50 X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] regr.fit(X_train, y_train) fifty_score = regr.score(X_test, y_test) predicted = regr.predict(X_test) cm = metrics.confusion_matrix(y_test, predicted) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] confusion_matrix = pd.DataFrame(data = cm_normalized) sns.heatmap(confusion_matrix, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r', ax = axes[0], cbar = False); axes[0].set_ylabel('Actual label', fontsize = 45); sample_size = 100 X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] regr.fit(X_train, y_train) hundred_score = regr.score(X_test, y_test) predicted = regr.predict(X_test) cm = metrics.confusion_matrix(y_test, predicted) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] confusion_matrix = pd.DataFrame(data = cm_normalized) sns.heatmap(confusion_matrix, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r', ax = axes[1], cbar = False); axes[1].set_xlabel('Predicted label', fontsize = 45); sample_size = 5000 X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] regr.fit(X_train, y_train) five_thousand_score = regr.score(X_test, y_test) predicted = regr.predict(X_test) cm = metrics.confusion_matrix(y_test, predicted) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] confusion_matrix = pd.DataFrame(data = cm_normalized) sns.heatmap(confusion_matrix, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r', ax = axes[2], cbar = False); plt.tight_layout() fifty = '50 Training Samples Score: {0}'.format(fifty_score) hundred = '100 Training Samples Score: {0}'.format(hundred_score) thousand = '5000 Training Samples Score: {0}'.format(five_thousand_score) axes[0].set_title(fifty, size = 30); axes[1].set_title(hundred, size = 30); axes[2].set_title(thousand, size = 30); fig.suptitle('Confusion Matrices', y=0.568, size = 55); ``` <h3 align='Left'>All Training Samples</h3> ``` regr2 = LogisticRegression(solver='sag') sample_size = len(train_dataset) X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] %time regr2.fit(X_train, y_train) regr2.score(X_test, y_test) pred_labels = regr2.predict(X_test) disp_sample_dataset(test_dataset, pred_labels) ```
github_jupyter
# These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import matplotlib.pyplot as plt import numpy as np import os import sys import tarfile from IPython.display import display, Image from scipy import ndimage from sklearn.linear_model import LogisticRegression from six.moves.urllib.request import urlretrieve from six.moves import cPickle as pickle # Need to use matplotlib inline to produce plots inside jupyter notebook %matplotlib inline !pwd url = 'http://commondatastorage.googleapis.com/books1000/' last_percent_reported = None def download_progress_hook(count, blockSize, totalSize): """A hook to report the progress of a download. This is mostly intended for users with slow internet connections. Reports every 1% change in download progress. """ global last_percent_reported percent = int(count * blockSize * 100 / totalSize) if last_percent_reported != percent: if percent % 5 == 0: sys.stdout.write("%s%%" % percent) sys.stdout.flush() else: sys.stdout.write(".") sys.stdout.flush() last_percent_reported = percent def maybe_download(filename, expected_bytes, force=False): """Download a file if not present, and make sure it's the right size.""" if force or not os.path.exists(filename): print('Attempting to download:', filename) filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook) print('\nDownload Complete!') statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?') return filename train_filename = maybe_download('notMNIST_large.tar.gz', 247336696) test_filename = maybe_download('notMNIST_small.tar.gz', 8458043) num_classes = 10 np.random.seed(133) def maybe_extract(filename, force=False): root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz if os.path.isdir(root) and not force: # You may override by setting force=True. print('%s already present - Skipping extraction of %s.' % (root, filename)) else: print('Extracting data for %s. This may take a while. Please wait.' % root) tar = tarfile.open(filename) sys.stdout.flush() tar.extractall() tar.close() data_folders = [os.path.join(root, d) for d in sorted(os.listdir(root)) if os.path.isdir(os.path.join(root, d))] if len(data_folders) != num_classes: raise Exception( 'Expected %d folders, one per class. Found %d instead.' % ( num_classes, len(data_folders))) print(data_folders) return data_folders train_folders = maybe_extract(train_filename) test_folders = maybe_extract(test_filename) import random import hashlib %matplotlib inline def disp_samples(data_folders, sample_size): for folder in data_folders: print(folder) image_files = os.listdir(folder) image_sample = random.sample(image_files, sample_size) for image in image_sample: image_file = os.path.join(folder, image) i = Image(filename=image_file) display(i) disp_samples(train_folders, 1) disp_samples(test_folders, 1) image_size = 28 # Pixel width and height. pixel_depth = 255.0 # Number of levels per pixel. def load_letter(folder, min_num_images): """Load the data for a single letter label.""" image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size), dtype=np.float32) print(folder) num_images = 0 for image in image_files: image_file = os.path.join(folder, image) try: image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth if image_data.shape != (image_size, image_size): raise Exception('Unexpected image shape: %s' % str(image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] for folder in data_folders: set_filename = folder + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: # You may override by setting force=True. print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(folder, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names train_datasets = maybe_pickle(train_folders, 45000) test_datasets = maybe_pickle(test_folders, 1800) def ipython_display_samples(folders): print (folders) for i in folders: sample = np.random.choice(os.listdir(i), 1)[0] display(Image(os.path.join(i, sample))) # train folders ipython_display_samples(train_folders) # test folders ipython_display_samples(test_folders) def matplotlib_pyplot_samples(pkls): for idx, pkl in enumerate(pkls): with open(pkl, 'r') as f: dataset = pickle.load(f) sample = np.random.choice(len(dataset), 5) plt.figure(idx+1) for i, k in enumerate(sample): plt.subplot(1,5,i+1) plt.imshow(dataset[k]) matplotlib_pyplot_samples(train_datasets) def balance_check(pkls): for pkl in pkls: with open(pkl, 'r') as f: dataset = pickle.load(f) print (pkl, len(dataset)) balance_check(train_datasets) balance_check(test_datasets) def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class+tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) # let's shuffle the letters to have random validation and training set np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels train_size = 200000 valid_size = 10000 test_size = 10000 valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets( train_datasets, train_size, valid_size) _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size) print('Training:', train_dataset.shape, train_labels.shape) print('Validation:', valid_dataset.shape, valid_labels.shape) print('Testing:', test_dataset.shape, test_labels.shape) def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation,:,:] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels train_dataset, train_labels = randomize(train_dataset, train_labels) test_dataset, test_labels = randomize(test_dataset, test_labels) valid_dataset, valid_labels = randomize(valid_dataset, valid_labels) pretty_labels = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J'} def disp_sample_dataset(dataset, labels): items = random.sample(range(len(labels)), 8) for i, item in enumerate(items): plt.subplot(2, 4, i+1) plt.axis('off') plt.title(pretty_labels[labels[item]]) plt.imshow(dataset[item]) disp_sample_dataset(train_dataset, train_labels) disp_sample_dataset(valid_dataset, valid_labels) disp_sample_dataset(test_dataset, test_labels) pickle_file = 'notMNIST.pickle' try: f = open(pickle_file, 'wb') save = { 'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, 'test_dataset': test_dataset, 'test_labels': test_labels, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise statinfo = os.stat(pickle_file) print('Compressed pickle size:', statinfo.st_size) # I dont think doing number 5 is currently useful """ pickle_file = 'notMNIST2.pickle' try: f = open(pickle_file, 'wb') save = { 'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, 'test_dataset': test_dataset, 'test_labels': test_labels, 'new_valid_dataset': new_valid_dataset, 'new_valid_labels': new_valid_labels, 'new_test_dataset': new_test_dataset, 'new_test_labels': new_test_labels, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise """ regr = LogisticRegression() X_test = test_dataset.reshape(test_dataset.shape[0], 28 * 28) y_test = test_labels from sklearn import metrics sample_size = 50 X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] %time regr.fit(X_train, y_train) #regr.score(X_test, y_test) predicted = regr.predict(X_test) cm = metrics.confusion_matrix(y_test, predicted) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] import pandas as pd import seaborn as sns plt.figure(figsize=(9,9)) confusion_matrix = pd.DataFrame(data = cm_normalized) sns.heatmap(confusion_matrix, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r'); plt.ylabel('Actual label'); plt.xlabel('Predicted label'); plt.title('Confusion Matrix', size = 15); fig, axes = plt.subplots(nrows = 1, ncols = 3, figsize = (27,81)); sample_size = 50 X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] regr.fit(X_train, y_train) fifty_score = regr.score(X_test, y_test) predicted = regr.predict(X_test) cm = metrics.confusion_matrix(y_test, predicted) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] confusion_matrix = pd.DataFrame(data = cm_normalized) sns.heatmap(confusion_matrix, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r', ax = axes[0], cbar = False); axes[0].set_ylabel('Actual label', fontsize = 45); sample_size = 100 X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] regr.fit(X_train, y_train) hundred_score = regr.score(X_test, y_test) predicted = regr.predict(X_test) cm = metrics.confusion_matrix(y_test, predicted) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] confusion_matrix = pd.DataFrame(data = cm_normalized) sns.heatmap(confusion_matrix, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r', ax = axes[1], cbar = False); axes[1].set_xlabel('Predicted label', fontsize = 45); sample_size = 5000 X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] regr.fit(X_train, y_train) five_thousand_score = regr.score(X_test, y_test) predicted = regr.predict(X_test) cm = metrics.confusion_matrix(y_test, predicted) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] confusion_matrix = pd.DataFrame(data = cm_normalized) sns.heatmap(confusion_matrix, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r', ax = axes[2], cbar = False); plt.tight_layout() fifty = '50 Training Samples Score: {0}'.format(fifty_score) hundred = '100 Training Samples Score: {0}'.format(hundred_score) thousand = '5000 Training Samples Score: {0}'.format(five_thousand_score) axes[0].set_title(fifty, size = 30); axes[1].set_title(hundred, size = 30); axes[2].set_title(thousand, size = 30); fig.suptitle('Confusion Matrices', y=0.568, size = 55); regr2 = LogisticRegression(solver='sag') sample_size = len(train_dataset) X_train = train_dataset[:sample_size].reshape(sample_size, 784) y_train = train_labels[:sample_size] %time regr2.fit(X_train, y_train) regr2.score(X_test, y_test) pred_labels = regr2.predict(X_test) disp_sample_dataset(test_dataset, pred_labels)
0.476336
0.92597
<style type="text/css"> </style> <b><center> <span style="font-size: 24pt; line-height: 1.2"> COMS W4111-002 (Spring 2021)<br>Introduction to Databases </span> </center></b> </span><br> <p> <i><center> <span style="font-size: 20pt; line-height: 1.2"> Homework 1: Programming - 10 Points </span> </center></i> __Note:__ Please replace the information below with your last name, first name and UNI.<br><br> <i> <span style="font-size: 20pt; line-height: 1.2"; > LastName_FirstName, UNI </span> </i> ## Introduction ### Objectives This homework has you practice and build skill with: - PART A: (1 point) Understanding relational databases - PART B: (1 point) Understanding relational algebra - PART C: (1 point) Cleaning data - PART D: (1 point) Performing simple SQL queries to analyze the data. - PART E: (6 points) CSVDataTable.py __Note:__ The motivation for PART E may not be clear. The motivation will become clearer as the semester proceeds. The purpose of PART E is to get you started on programming in Python and manipulating data. ### Submission 1. File > Download as > PDF via latex (.PDF) (Use the "File" menu option on the Jupyter notebook tool bar, not the option on the browser tool bar). 2. Upload .pdf and .ipynb to GradeScope 3. Upload CSVDataTable.py and CSVDataTable_Tests.py **This assignment is due January 29, 11:59 pm EDT** ### Collaboration - You may use any information you get in TA or Prof. Ferguson's office hours, from lectures or from recitations. - You may use information that you find on the web. - You are NOT allowed to collaborate with other students outside of office hours. # Part A: Written 1. What is a database management system? <i> Your answer here </i> 2. What is a foreign key? <i> Your answer here </i> 3. What is a primary key? <i> Your answer here </i> 4. What are 4 different types of DBMS relationships, give a brief explanaition for each? <i> Your answer here </i> 5. What is an ER model? <i> Your answer here </i> 6. Using Lucidchart draw an example of a logical ER model using Crow's Foot notation for Columbia classes. The entity types are: - Students, Professors, and Classes. - The relationships are: - A Class has exactly one Professor. - A Student has exactly one professor who is an _advisor._ - A Professor may advise 0, 1 or many Students. - A Class has 0, 1 or many enrolled students. - A Student enrolls in 0, 1 or many Classes. - You can define what you think are common attributes for each of the entity types. Do not define more than 5 or 6 attributes per entity type. - In this example, explicitly show an example of a primary-key, foreign key, one-to-many relationship, and many-to-many relationship. __Notes:__ - If you have not already done so, please register for a free account at Lucidchart.com. You can choose the option at the bottom of the left pane to add the ER diagram shapes. - You can take a screen capture of you diagram and save in the zip directory that that contains you Jupyter notebook. Edit the following cell and replace "Boromir.jpg" with the name of the file containing your screenshot. <i> Use the following line to upload a photo of your Luicdchart. <i/> <img src="Boromir.jpg"> # Part B: Relational Algebra You will use [the online relational calculator](https://dbis-uibk.github.io/relax/landing), choose the “Karlsruhe University of Applied Sciences” dataset. An anti-join is a form of join with reverse logic. Instead of returning rows when there is a match (according to the join predicate) between the left and right side, an anti-join returns those rows from the left side of the predicate for which there is no match on the right. The Anti-Join Symbol is ▷. Consider the following relational algebra expression and result. /* (1) Set X = The set of classrooms in buildings Taylor or Watson. */ X = σ building='Watson' ∨ building='Taylor' (classroom) /* (2) Set Y = The Anti-Join of department and X */ Y = (department ▷ X) /* (3) Display the rows in Y. */ Y <img src="ra.png"> 1. Find an alternate expression to (2) that computes the correct answer given X. Display the execution of your query below. <i>Your screenshot here </i> # Part C: Data Clean Up ## Please note: You MUST make a new schema using the lahmansdb_to_clean.sql file provided in the data folder. Use thelahmansdb_to_clean.sql file to make a new schema containing the raw data. The lahman database you created in Homework 0 has already been cleaned with all the constraints and will be used for Part D. Knowing how to clean data and add integrity constraints is very important which is why you go through the steps in part C. TLDR: If you use the HW0 lahman schema for this part you will get a lot of errors and recieve a lot of deductions. ``` # You will need to follow instructions from HW 0 to make a new schema, import the data. # Connect to the unclean schema below by setting the database host, user ID and password. %load_ext sql %sql mysql+pymysql://dbuser:dbuserdbuser@localhost/lahmansdb_to_clean ``` Data cleanup: For each table we want you to clean, we have provided a list of changes you have to make. You can reference the cleaned lahman db for inspiration and guidance, but know that there are different ways to clean the data and you will be graded for your choice rationalization. You should make these changes through DataGrip's workbench's table editor and/or using SQL queries. In this part you will clean two tables: People and Batting. ### You must have: - A brief explanation of why you think the change we requested is important. - What change you made to the table. - Any queries you used to make the changes, either the ones you wrote or the Alter statements provided by DataGrip's editor. - Executed the test statements we provided - The cleaned table's new create statement (after you finish all the changes) ### Overview of Changes: People Table 0. Primary Key (Explanation is given, but you still must add the key to your table yourself) 1. Empty strings to NULLs 2. Column typing 3. isDead column 4. deathDate and birthDate column Batting Table 1. Empty strings to NULLs 2. Column typing 3. Primary Key 4. Foreign Key ### How to make the changes: __Using the Table Editor:__ When you hit apply, a popup will open displaying the ALTER statments sql generates. Copy the sql provided first and paste it into this notebook. Then you can apply the changes. This means that you are NOT executing the ALTER statements through your notebook. 1. Right click on the table > Modify Table... <img src="modify.png" width="400" height="800"> 2. Keys > press the + button > input the parameters > Execute OR Keys > press the + button > input the parameters > copy and paste the script generated under "SQL Script" and paste into your notebook > Run the cell in jupyter notebook <img src="pk.png" width="600" height="1200"> __Using sql queries:__ Copy paste any queries that you write manually into the notebook as well! <hr style="height:2px"> ## People Table ### 0) EXAMPLE: Add a Primary Key (Solutions are given but make sure you still do this step in workbench!) #### Explanation We want to add a Primary Key because we want to be able to uniquely identify rows within our data. A primary key is also an index, which allows us to locate data faster. #### Change I added a Primary Key on the playerID column and made the datatype VARCHAR(15) __Note:__ This is for demonstration purposes only. playerID __is not__ a primary key for fielding. #### SQL ~~~~sql ALTER TABLE `lahmansdb_to_clean`.`people` CHANGE COLUMN `playerID` `playerID` VARCHAR(15) NOT NULL , ADD PRIMARY KEY (`playerID`); ~~~~ #### Tests ``` %sql SHOW KEYS FROM people WHERE Key_name = 'PRIMARY' ``` ### 1) Convert all empty strings to NULL #### Explanation _Put your answer in this cell_ #### Change _Put your answer in this cell_ #### SQL ~~~~sql Put your answer in this cell ~~~~ #### Tests ``` %sql SELECT * FROM people WHERE birthState = "" ``` ### 2) Change column datatypes to appropriate values (ENUM, INT, VARCHAR, DATETIME, ETC) #### Explanation _Put your answer in this cell_ #### Change _Put your answer in this cell_ #### SQL ~~~~sql Put your answer in this cell ~~~~ ### 3) Add an isDead Column that is either 'Y' or 'N' - Some things to think of: What data type should this column be? How do you know if the player is dead or not? Maybe you do not know if the player is dead. - You do not need to make guesses about life spans, etc. Just apply a simple rule. 'Y' means the player is dead 'N' means the player is alive #### Explanation _Put your answer in this cell_ #### Change _Put your answer in this cell_ #### SQL ~~~~sql Put your answer in this cell ~~~~ #### Tests ``` %sql SELECT * FROM people WHERE isDead = "N" limit 10 ``` ### 4) Add a deathDate and birthDate column Some things to think of: What do you do if you are missing information? What datatype should this column be? You have to create this column from other columns in the table. #### Explanation _Put your answer in this cell_ #### Change _Put your answer in this cell_ #### SQL ~~~~sql Put your answer in this cell ~~~~ #### Tests ``` %sql SELECT deathDate FROM people WHERE deathDate >= '2005-01-01' ORDER BY deathDate ASC LIMIT 10; %sql SELECT birthDate FROM people WHERE birthDate <= '1965-01-01' ORDER BY birthDate ASC LIMIT 10; ``` ### Final CREATE Statement To find the create statement: - Right click on the table name in workbench - Select 'Copy to Clipboard' - Select 'Create Statement' The create statement will now be copied into your clipboard and can be pasted into the cell below. ~~~~sql Put your answer in this cell ~~~~ <hr style="height:2px"> ## Batting Table ### 1) Convert all empty strings to NULL #### Explanation _Put your answer in this cell_ #### Change _Put your answer in this cell_ #### SQL ~~~~sql Put your answer in this cell ~~~~ #### Tests ``` %sql SELECT count(*) FROM lahman2019clean.batting where RBI is NULL; ``` ### 2) Change column datatypes to appropriate values (ENUM, INT, VARCHAR, DATETIME, ETC) #### Explanation _Put your answer in this cell_ #### Change _Put your answer in this cell_ #### SQL ~~~~sql Put your answer in this cell ~~~~ ### 3) Add a Primary Key Two options for the Primary Key: - Composite Key: playerID, yearID, stint - Covering Key (Index): playerID, yearID, stint, teamID #### Choice _Put your answer in this cell_ #### Explanation _Put your answer in this cell_ #### SQL ~~~~sql Put your answer in this cell ~~~~ #### Test ``` %sql SHOW KEYS FROM batting WHERE Key_name = 'PRIMARY' and Column_name = 'playerID' ``` ### 4) Add a foreign key on playerID between the People and Batting Tables Note: Two people in the batting table do not exist in the people table. How should you handle this issue? #### Explanation _Put your answer in this cell_ #### Change _Put your answer in this cell_ #### SQL ~~~~sql Put your answer in this cell ~~~~ #### Tests ``` %sql Select playerID from batting where playerID not in (select playerID from people); ``` ### Final CREATE Statement To find the create statement: - Right click on the table name in workbench - Select 'Copy to Clipboard' - Select 'Create Statement' The create statement will now be copied into your clipboard and can be pasted into the cell below. ~~~~sql Put your answer in this cell ~~~~ # Part D: SQL Queries NOTE: You must use the CLEAN lahman schema provided in HW0 for the queries below to ensure your answers are consistent with the solutions. ## Question 0 What is the highest salary in baseball history? ## Question 1 Create a Table of all players with a first name of John who were born in the United States and played at Fordham university. Include their first name, last name, playerID, and birth state. <i> Hint: Use a Join between People and CollegePlaying </i> ``` %sql select * from JOHNS ``` ## Question 2 Update all entries with full_name Columbia University to 'Columbia University!' in the Schools table. Then select the row. # Part E: CSVDataTable ## i. Conceptual Questions The purpose of this homework is to teach you the behaviour of SQL Databases by asking you to implement functions that will model the behaviour of a real database with CSVDataTable. You will mimic a SQL Database using CSV files. Read through the scaffolding code provided in the CSVDataTable folder first to understand and answer the following conceptual questions. 1. Given this SQL statement: SELECT nameFirst, nameLast FROM people WHERE playerID = collied01 If you run find_by_primary_key() on this statement, what are key_fields and field_list? <i> Your answer here </i> 2. What should be checked when you are trying to INSERT a new row into a table with a PK? <i> Your answer here </i> 3. What should be checked when you are trying to UPDATE a row in a table with a PK? <i> Your answer here </i> ## ii. Coding You are responsible for implementing and testing two classes in Python: CSVDataTable, BaseDataTable. The python files and data can be found in the assignment under Courseworks. We have already given you **find_by_template(self, template, field_list=None, limit=None, offset=None, order_by=None)** Use this as a jumping off point for the rest of your functions. Methods to complete: CSVDataTable.py - find_by_primary_key(self, key_fields, field_list=None) - delete_by_key(self, key_fields) - delete_by_template(self, template) - update_by_key(self, key_fields, new_values) - update_by_template(self, template, new_values) - insert(self, new_record) CSV_table_tests.py - You must test all methods. You will have to write these tests yourself. - You must test your methods on the People and Batting table. If you do not include tests and tests outputs 50% of this section's points will be deducted at the start ## iii. Testing Please copy the text from the output of your tests and paste it below:
github_jupyter
# You will need to follow instructions from HW 0 to make a new schema, import the data. # Connect to the unclean schema below by setting the database host, user ID and password. %load_ext sql %sql mysql+pymysql://dbuser:dbuserdbuser@localhost/lahmansdb_to_clean %sql SHOW KEYS FROM people WHERE Key_name = 'PRIMARY' %sql SELECT * FROM people WHERE birthState = "" %sql SELECT * FROM people WHERE isDead = "N" limit 10 %sql SELECT deathDate FROM people WHERE deathDate >= '2005-01-01' ORDER BY deathDate ASC LIMIT 10; %sql SELECT birthDate FROM people WHERE birthDate <= '1965-01-01' ORDER BY birthDate ASC LIMIT 10; %sql SELECT count(*) FROM lahman2019clean.batting where RBI is NULL; %sql SHOW KEYS FROM batting WHERE Key_name = 'PRIMARY' and Column_name = 'playerID' %sql Select playerID from batting where playerID not in (select playerID from people); %sql select * from JOHNS
0.269902
0.808861
Generate a test data for a suedo Bill of Materials (BoM) ``` import numpy as np import pandas as pd import os # Generate Products size_products = 21 products = [] for i in range(size_products): num = str(i) zeroes = 3 - len(num) serial = "000"[:zeroes] + num products.append("PROD_" + serial) ProductCodes = pd.DataFrame(data = products, columns=["SKU"]) # Generate Layer 1 Componets size_L1_components = 50 L1_components = [] for i in range(size_L1_components): num = str(i) zeroes = 3 - len(num) serial = "000"[:zeroes] + num L1_components.append("L1_" + serial) # Generate Layer 2 Componets size_L2_components = 50 L2_components = [] for i in range(size_L2_components): num = str(i) zeroes = 3 - len(num) serial = "000"[:zeroes] + num L2_components.append("L2_" + serial) BoMStructure = pd.DataFrame(columns=["Parent", "Component", "QtyPer"]) # Product : Layer1 np.random.seed(0) num_product_comps = np.random.randint(3,10, size=size_products) np.random.seed(1) L1_inds = np.random.randint(0,size_L1_components, size=sum(num_product_comps)) np.random.seed(2) prod_qp_numerators = np.random.randint(1,10, size=sum(num_product_comps)) np.random.seed(3) prod_qp_denominators = np.random.randint(1,10, size=sum(num_product_comps)) prod_qty_pers = np.around(prod_qp_numerators/prod_qp_denominators, 3) prod_comp_ind = 0 for i in range(size_products): parent = products[i] num_comps = num_product_comps[i] for j in range(num_comps): component = L1_components[L1_inds[prod_comp_ind]] qty_per = prod_qty_pers[prod_comp_ind] prod_comp_ind += 1 temp = pd.DataFrame( data=[[parent, component, qty_per]], columns=["Parent", "Component", "QtyPer"] ) BoMStructure = BoMStructure.append(temp) # Layer1 : Layer2 np.random.seed(4) num_L1_comps = np.random.randint(3,10, size=size_L1_components) np.random.seed(5) L2_inds = np.random.randint(0,size_L2_components, size=sum(num_L1_comps)) np.random.seed(6) L1_qp_numerators = np.random.randint(1,10, size=sum(num_L1_comps)) np.random.seed(7) L1_qp_denominators = np.random.randint(1,10, size=sum(num_L1_comps)) L1_qty_pers = np.around(L1_qp_numerators/L1_qp_denominators, 3) L1_comp_ind = 0 for i in range(size_L1_components): parent = L1_components[i] num_comps = num_L1_comps[i] for j in range(num_comps): component = L2_components[L2_inds[L1_comp_ind]] qty_per = L1_qty_pers[L1_comp_ind] L1_comp_ind += 1 temp = pd.DataFrame( data=[[parent, component, qty_per]], columns=["Parent", "Component", "QtyPer"] ) BoMStructure = BoMStructure.append(temp) BoMStructure.to_csv(os.path.join(os.getcwd(), "data", "..", "Test_BoMStructure.csv"), index=False) ProductCodes.to_csv(os.path.join(os.getcwd(), "data", "..", "Test_ProductSKUs.csv"), index=False) ```
github_jupyter
import numpy as np import pandas as pd import os # Generate Products size_products = 21 products = [] for i in range(size_products): num = str(i) zeroes = 3 - len(num) serial = "000"[:zeroes] + num products.append("PROD_" + serial) ProductCodes = pd.DataFrame(data = products, columns=["SKU"]) # Generate Layer 1 Componets size_L1_components = 50 L1_components = [] for i in range(size_L1_components): num = str(i) zeroes = 3 - len(num) serial = "000"[:zeroes] + num L1_components.append("L1_" + serial) # Generate Layer 2 Componets size_L2_components = 50 L2_components = [] for i in range(size_L2_components): num = str(i) zeroes = 3 - len(num) serial = "000"[:zeroes] + num L2_components.append("L2_" + serial) BoMStructure = pd.DataFrame(columns=["Parent", "Component", "QtyPer"]) # Product : Layer1 np.random.seed(0) num_product_comps = np.random.randint(3,10, size=size_products) np.random.seed(1) L1_inds = np.random.randint(0,size_L1_components, size=sum(num_product_comps)) np.random.seed(2) prod_qp_numerators = np.random.randint(1,10, size=sum(num_product_comps)) np.random.seed(3) prod_qp_denominators = np.random.randint(1,10, size=sum(num_product_comps)) prod_qty_pers = np.around(prod_qp_numerators/prod_qp_denominators, 3) prod_comp_ind = 0 for i in range(size_products): parent = products[i] num_comps = num_product_comps[i] for j in range(num_comps): component = L1_components[L1_inds[prod_comp_ind]] qty_per = prod_qty_pers[prod_comp_ind] prod_comp_ind += 1 temp = pd.DataFrame( data=[[parent, component, qty_per]], columns=["Parent", "Component", "QtyPer"] ) BoMStructure = BoMStructure.append(temp) # Layer1 : Layer2 np.random.seed(4) num_L1_comps = np.random.randint(3,10, size=size_L1_components) np.random.seed(5) L2_inds = np.random.randint(0,size_L2_components, size=sum(num_L1_comps)) np.random.seed(6) L1_qp_numerators = np.random.randint(1,10, size=sum(num_L1_comps)) np.random.seed(7) L1_qp_denominators = np.random.randint(1,10, size=sum(num_L1_comps)) L1_qty_pers = np.around(L1_qp_numerators/L1_qp_denominators, 3) L1_comp_ind = 0 for i in range(size_L1_components): parent = L1_components[i] num_comps = num_L1_comps[i] for j in range(num_comps): component = L2_components[L2_inds[L1_comp_ind]] qty_per = L1_qty_pers[L1_comp_ind] L1_comp_ind += 1 temp = pd.DataFrame( data=[[parent, component, qty_per]], columns=["Parent", "Component", "QtyPer"] ) BoMStructure = BoMStructure.append(temp) BoMStructure.to_csv(os.path.join(os.getcwd(), "data", "..", "Test_BoMStructure.csv"), index=False) ProductCodes.to_csv(os.path.join(os.getcwd(), "data", "..", "Test_ProductSKUs.csv"), index=False)
0.147156
0.781414
<h1><center><span style="color:red">Sentiment Analysis using RNN</span></center></h1> ## Introduction to Notebook <br> In this notebook we will looking towards the problem of Sentiment Analysis ## Objective of this Notebook <br> In this notebook I would be desigining the model objective in which I would be executing several commands to get model for training data. ## In this notebook <pre> <br> This notebook is divided into the below sections: <br> <br> 1. Data Cleaning and Preprocessing. <br><br> 2. Exploratory Data Analysis (EDA) and Visualisation. <br><br> 3. Feature Selection, Feature engineering and <br><br> 4. Model building. <br><br> 5. Checking Accuracy </pre> ## Contents in this Notebook - [Form a Dataset](#lesson_1) - [Developing a "Predictive Theory"](#lesson_2) - [**1**: Quick Theory Validation](#project_1) - [Transforming Text to Numbers](#lesson_3) - [**2**: Creating the Input/Output Data](#project_2) - [**3**: Building our Neural Network](#project_3) - [Understanding Neural Noise](#lesson_4) - [**4**: Making Learning Faster by Reducing Noise](#project_4) - [Analyzing Inefficiencies in our Network](#lesson_5) - [**5**: Making our Network Train and Run Faster](#project_5) - [Further Noise Reduction](#lesson_6) - [**6**: Reducing Noise by Strategically Reducing the Vocabulary](#project_6) - [Analysis: What's going on in the weights?](#lesson_7) ## Hypothesis generation <br> As this is a one of the very important stage in any data science/machine learning pipeline. It involves understanding the problem in detail which can impact the outcome. It is done by understanding the problem statement like in this problem predicting the payement date. # Form a Dataset<a id='lesson_1'></a> ``` import numpy as np import pandas as pd from collections import Counter ``` **Note:** The data in `reviews.txt` we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way. ``` def print_review_and_label(i): print(labels[i] + "\t:\t" + reviews[i][:80] + "...") g = open('reviews.txt','r') # What we know! reviews = list(map(lambda x:x[:-1],g.readlines())) g.close() g = open('labels.txt','r') # What we WANT to know! labels = list(map(lambda x:x[:-1].upper(),g.readlines())) g.close() len(reviews) len(labels) reviews[0] labels[0] ``` # Lesson: Develop a Predictive Theory<a id='lesson_2'></a> ``` print("labels.txt \t : \t reviews.txt\n") print_review_and_label(2137) print_review_and_label(500) print_review_and_label(6267) print_review_and_label(1500) print_review_and_label(5265) print_review_and_label(2000) print_review_and_label(9267) ``` # Project 1: Quick Theory Validation<a id='project_1'></a> We can find the [Counter](https://docs.python.org/2/library/collections.html#collections.Counter) class to be useful in this exercise, as well as the [numpy](https://docs.scipy.org/doc/numpy/reference/) library. ``` # Create three Counter objects to store positive, negative and total counts positive_counts = Counter() negative_counts = Counter() total_counts = Counter() ``` **TODO:** Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter. **Note:** Throughout these projects, you should use `split(' ')` to divide a piece of text (such as a review) into individual words. If you use `split()` instead, you'll get slightly different results than what the videos and solutions show. ``` # Loop over all the words in all the reviews and increment the counts in the appropriate counter objects for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1 # Examine the counts of the most common words in positive reviews positive_counts.most_common() # Examine the counts of the most common words in negative reviews negative_counts.most_common() ``` As we can see, common words like "the" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the **ratios** of word usage between positive and negative reviews. **TODO:** Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in `pos_neg_ratios`. >Hint: the positive-to-negative ratio for a given word can be calculated with `positive_counts[word] / float(negative_counts[word]+1)`. Notice the `+1` in the denominator – that ensures we don't divide by zero for words that are only seen in positive reviews. ``` pos_neg_ratios = Counter() # Calculate the ratios of positive and negative uses of the most common words # Consider words to be "common" if they've been used at least 100 times for term,cnt in list(total_counts.most_common()): if(cnt > 100): pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1) pos_neg_ratios[term] = pos_neg_ratio print("Pos-to-neg ratio for 'and' \t= \t{}".format(pos_neg_ratios["and"])) print("Pos-to-neg ratio for 'the' \t= \t{}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' \t= \t{}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'good' \t= \t{}".format(pos_neg_ratios["good"])) ``` Looking closely at the values you just calculated, we see the following: * Words that you would expect to see more often in positive reviews – like "amazing" – have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be. * Words that you would expect to see more often in negative reviews – like "terrible" – have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be. * Neutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews – like "the" – have values very close to 1. A perfectly neutral word – one that was used in exactly the same number of positive reviews as negative reviews – would be almost exactly 1. The `+1` we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway. Ok, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like "amazing" has a value above 4, whereas a very negative word like "terrible" has a value around 0.18. Those values aren't easy to compare for a couple of reasons: * Right now, 1 is considered neutral, but the absolute value of the postive-to-negative ratios of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value for neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys. * When comparing absolute values it's easier to do that around zero than one. To fix these issues, we'll convert all of our ratios to new values using logarithms. **TODO:** Go through all the ratios you calculated and convert them to logarithms. (i.e. use `np.log(ratio)`) In the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but opposite signs. ``` # Convert ratios to logs for word,ratio in pos_neg_ratios.most_common(): pos_neg_ratios[word] = np.log(ratio) print("Pos-to-neg ratio for 'and' \t= \t{}".format(pos_neg_ratios["and"])) print("Pos-to-neg ratio for 'the' \t= \t{}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' \t= \t{}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'good' \t= \t{}".format(pos_neg_ratios["good"])) ``` If everything worked, now you should see neutral words with values close to zero. In this case, "the" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at "amazing"'s ratio - it's above `1`, showing it is clearly a word with positive sentiment. And "terrible" has a similar score, but in the opposite direction, so it's below `-1`. It's now clear that both of these words are associated with specific, opposing sentiments. Now run the following cells to see more ratios. The first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see *all* the words in the list.) The second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write `reversed(pos_neg_ratios.most_common())`.) You should continue to see values similar to the earlier ones we checked – neutral words will be close to `0`, words will get more positive as their ratios approach and go above `1`, and words will get more negative as their ratios approach and go below `-1`. That's why we decided to use the logs instead of the raw ratios. ``` # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] # If we explore the documentation for the Counter class, # we will see you could also find the 30 least common # words like this: pos_neg_ratios.most_common()[:-31:-1] ``` # Transforming Text into Numbers<a id='lesson_3'></a> ``` from IPython.display import Image review = "This was a horrible, terrible movie." Image(filename='Images/sentiment_network.png') review = "The movie was excellent" Image(filename='Images/sentiment_network_pos.png') ``` # Project 2: Creating the Input/Output Data<a id='project_2'></a> **TODO:** Create a [set](https://docs.python.org/3/tutorial/datastructures.html#sets) named `vocab` that contains every word in the vocabulary. ``` vocab = set(total_counts.keys()) ``` Run the following cell to check your vocabulary size. If everything worked correctly, it should print **74074** ``` vocab_size = len(vocab) print(vocab_size) ``` Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. `layer_0` is the input layer, `layer_1` is a hidden layer, and `layer_2` is the output layer. ``` from IPython.display import Image Image(filename='Images/sentiment_network_2.png') ``` **TODO:** Create a numpy array called `layer_0` and initialize it to all zeros. You will find the [zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html) function particularly helpful here. Be sure you create `layer_0` as a 2-dimensional matrix with 1 row and `vocab_size` columns. ``` layer_0 = np.zeros((1,vocab_size)) ``` Run the following cell. It should display `(1, 74074)` ``` layer_0.shape from IPython.display import Image Image(filename='Images/sentiment_network.png') ```
github_jupyter
import numpy as np import pandas as pd from collections import Counter def print_review_and_label(i): print(labels[i] + "\t:\t" + reviews[i][:80] + "...") g = open('reviews.txt','r') # What we know! reviews = list(map(lambda x:x[:-1],g.readlines())) g.close() g = open('labels.txt','r') # What we WANT to know! labels = list(map(lambda x:x[:-1].upper(),g.readlines())) g.close() len(reviews) len(labels) reviews[0] labels[0] print("labels.txt \t : \t reviews.txt\n") print_review_and_label(2137) print_review_and_label(500) print_review_and_label(6267) print_review_and_label(1500) print_review_and_label(5265) print_review_and_label(2000) print_review_and_label(9267) # Create three Counter objects to store positive, negative and total counts positive_counts = Counter() negative_counts = Counter() total_counts = Counter() # Loop over all the words in all the reviews and increment the counts in the appropriate counter objects for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1 # Examine the counts of the most common words in positive reviews positive_counts.most_common() # Examine the counts of the most common words in negative reviews negative_counts.most_common() pos_neg_ratios = Counter() # Calculate the ratios of positive and negative uses of the most common words # Consider words to be "common" if they've been used at least 100 times for term,cnt in list(total_counts.most_common()): if(cnt > 100): pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1) pos_neg_ratios[term] = pos_neg_ratio print("Pos-to-neg ratio for 'and' \t= \t{}".format(pos_neg_ratios["and"])) print("Pos-to-neg ratio for 'the' \t= \t{}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' \t= \t{}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'good' \t= \t{}".format(pos_neg_ratios["good"])) # Convert ratios to logs for word,ratio in pos_neg_ratios.most_common(): pos_neg_ratios[word] = np.log(ratio) print("Pos-to-neg ratio for 'and' \t= \t{}".format(pos_neg_ratios["and"])) print("Pos-to-neg ratio for 'the' \t= \t{}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' \t= \t{}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'good' \t= \t{}".format(pos_neg_ratios["good"])) # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] # If we explore the documentation for the Counter class, # we will see you could also find the 30 least common # words like this: pos_neg_ratios.most_common()[:-31:-1] from IPython.display import Image review = "This was a horrible, terrible movie." Image(filename='Images/sentiment_network.png') review = "The movie was excellent" Image(filename='Images/sentiment_network_pos.png') vocab = set(total_counts.keys()) vocab_size = len(vocab) print(vocab_size) from IPython.display import Image Image(filename='Images/sentiment_network_2.png') layer_0 = np.zeros((1,vocab_size)) layer_0.shape from IPython.display import Image Image(filename='Images/sentiment_network.png')
0.29972
0.989327
# Searching Through Hypothesis Space ``` %matplotlib inline import numpy as np import pandas import sys from plotnine import * sys.path.append('..') from plotting import plot_linear_classifier from plotting import plot_classifier ``` ## Toy dataset ``` data = pandas.read_pickle('../data/two_2dgaussians.pkl') ggplot(data, aes(x='x', y='y', fill='label', shape='label'))+geom_point(size=3) ``` ## Query by commitee (bagging) ``` def plot_linear_classifier_commitee(sample, data, lgs): gp = ggplot(sample, aes(x='x', y='y', fill='label', shape='label')) for lg in lgs: w0 = lg.intercept_[0] w1, w2 = lg.coef_[0] def boundary(x): return (-w0/w2)+(-w1/w2)*x gp = gp+geom_segment(x=data.x.min(),xend=data.x.max(),y=boundary(data.x.min()),yend=boundary(data.x.max())) return (gp+\ geom_point(size=3)+\ geom_point(data, aes(x='x',y='y',color='label'))) def train_commitee(lg, sample): for i in range(0,len(lg)): s = sample.sample(n = round(0.8*len(sample)), replace=True) while len(s.label[s.label==True])==0 or len(s.label[s.label==False])==0: s = sample.sample(n = round(0.8*len(sample)), replace=True) X = s[['x','y']] y = s.label lg[i].fit(X,y) def soft_entropy(lgs, U): p = np.zeros(len(U)) for lg in lgs: p = p + lg.predict_proba(U[['x','y']])[:,1] p = p/float(len(lgs)) #entropy = -(p1 * np.log(p1) + p0 * np.log(p0)) -> bei maximimierung reicht aber distanz von 0.5 return -np.abs(p-0.5) def aktive_learn(num_samples, initial_sample, data, commitee): sample = initial_sample labeled = initial_sample.index unlabeled = data.index[~data.index.isin(sample.index)] for i in range(0,num_samples+1): sample = data.loc[labeled] X = sample[['x','y']] y = sample.label train_commitee(commitee, sample) U = data.loc[unlabeled] entropy = soft_entropy(lg, U) xstar_index = U.index[np.argmax(entropy)] labeled = labeled.insert(0,xstar_index) unlabeled = unlabeled.drop(xstar_index) return sample from sklearn.linear_model import LogisticRegression sample = data.sample(10) C = 5 lg = [LogisticRegression() for i in range(0,C)] train_commitee(lg, sample) plot_linear_classifier_commitee(sample,data,lg) sample = aktive_learn(20, sample, data, lg) plot_linear_classifier_commitee(sample,data,lg) ``` ## Query by Commitee (with outliers) ``` data = pandas.read_pickle('../data/two_2dgaussians_with_outliers.pkl') initial_sample = data.sample(10) sample = aktive_learn(20, initial_sample, data, lg) selected = data.loc[sample.index.difference(initial_sample.index)] plot_linear_classifier_commitee(sample,data,lg)+geom_point(selected,aes(x='x',y='y'), size=0.1) ``` ## Query by Commitee, skewed data ``` data = pandas.read_pickle('../data/two_2dgaussians_skewed.pkl') initial_sample = data[data.label==True].sample(1) initial_sample = initial_sample.append(data.sample(9)) sample = aktive_learn(20, initial_sample, data, lg) selected = data.loc[sample.index.difference(initial_sample.index)] plot_linear_classifier_commitee(sample,data,lg)+geom_point(selected,aes(x='x',y='y'), size=0.1) ``` ## Query by Commitee (general vs specific) ``` def active_learn_gs(num_samples, initial_sample, data, data_bg, model_s, model_g): sample = initial_sample labeled = sample.index unlabeled = data.index[~data.index.isin(sample.index)] bg_size = len(data_bg) specific_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([False], repeats = bg_size)}) general_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([True], repeats = bg_size)}) for i in range(0,21): sample = data.loc[labeled] sample_specific = sample.append(specific_bg, ignore_index=True) sample_general = sample.append(general_bg, ignore_index=True) X_s = sample_specific[['x','y']] y_s = sample_specific.label X_g = sample_general[['x','y']] y_g = sample_general.label weights = np.repeat(1.0, len(sample)) weights = np.append(weights, np.repeat(weight, repeats = bg_size)) model_s.fit(X_s,y_s,weights) model_g.fit(X_g,y_g,weights) U = data.loc[unlabeled] entropy = soft_entropy([lg_s, lg_g], U) xstar_index = U.index[np.argmax(entropy)] labeled = labeled.insert(0,xstar_index) unlabeled = unlabeled.drop(xstar_index) return sample ``` ### Background data ``` data = pandas.read_pickle('../data/two_2dgaussians.pkl') data = pandas.read_pickle('../data/two_2dgaussians_with_outliers.pkl') data_bg = pandas.read_pickle('../data/two_2dgaussians_bg.pkl') bg_size = len(data_bg) specific_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([False], repeats = bg_size)}) general_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([True], repeats = bg_size)}) sample = data.sample(10) sample_specific = sample.append(specific_bg, ignore_index=True) sample_general = sample.append(general_bg, ignore_index=True) df_plot = data_bg.copy() df_plot['label'] = 'Unkown' lg_s = LogisticRegression() lg_g = LogisticRegression() ``` ### Initial models ``` weight = 4.0/bg_size X_s = sample_specific[['x','y']] y_s = sample_specific.label X_g = sample_general[['x','y']] y_g = sample_general.label weights = np.repeat(1.0, len(sample)) weights = np.append(weights, np.repeat(weight, repeats = bg_size)) lg_s.fit(X_s,y_s,weights) lg_g.fit(X_g,y_g,weights) plot_linear_classifier_commitee(sample,data,[lg_s, lg_g])+geom_point(df_plot, size=0.1)+ \ guides(color=False)+\ guides(shape=False)+\ guides(fill=False) ``` ### Active learning ``` initial_sample = sample sample = active_learn_gs(20, initial_sample, data, data_bg, lg_s, lg_g) plot_linear_classifier_commitee(sample,data,[lg_s, lg_g])+geom_point(df_plot, size=0.1)+ \ guides(color=False)+\ guides(shape=False)+\ guides(fill=False) ``` ### Query by commitee, skewed data ``` data = pandas.read_pickle('../data/two_2dgaussians_skewed.pkl') data_bg = pandas.read_pickle('../data/two_2dgaussians_bg_skewed.pkl') initial_sample = data[data.label==True].sample(1) initial_sample = initial_sample.append(data.sample(9)) sample = active_learn_gs(20, initial_sample, data, data_bg, lg_s, lg_g) df_plot = data_bg.copy() df_plot['label'] = 'Unkown' plot_linear_classifier_commitee(sample,data,[lg_s, lg_g])+geom_point(df_plot, size=0.1)+ \ guides(color=False)+\ guides(shape=False)+\ guides(fill=False) ``` ## Query by commitee, biased data (general vs specific) ### Toy Dataset ``` data = pandas.read_pickle('../data/three_2dgaussians.pkl') ggplot(data, aes(x='x', y='y', fill='label', shape='label'))+geom_point(size=3) sample = data[data.y<5].sample(20) labeled = sample.index labeled_initial = labeled unlabeled = data.index[~data.index.isin(sample.index)] ``` ### Background data ``` data_bg = pandas.read_pickle('../data/three_2dgaussians_bg.pkl') bg_size = len(data_bg) specific_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([False], repeats = bg_size)}) general_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([True], repeats = bg_size)}) sample_specific = sample.append(specific_bg, ignore_index=True) sample_general = sample.append(general_bg, ignore_index=True) ``` ### Initial models ``` from sklearn.ensemble import RandomForestClassifier rf_g = RandomForestClassifier(n_estimators=100, max_features=2, min_samples_leaf=8) rf_s = RandomForestClassifier(n_estimators=100, max_features=2, min_samples_leaf=8) weight = 1.0/bg_size X_s = sample_specific[['x','y']] y_s = sample_specific.label X_g = sample_general[['x','y']] y_g = sample_general.label weights = np.repeat(1.0, len(sample)) weights = np.append(weights, np.repeat(weight, repeats = bg_size)) rf_s.fit(X_s,y_s,weights) rf_g.fit(X_g,y_g,weights) plot_classifier(sample, data, rf_s) plot_classifier(sample, data, rf_g) labeled = sample.index unlabeled = data.index[~data.index.isin(sample.index)] for i in range(0,21): sample = data.loc[labeled] sample_specific = sample.append(specific_bg, ignore_index=True) sample_general = sample.append(general_bg, ignore_index=True) X_s = sample_specific[['x','y']] y_s = sample_specific.label X_g = sample_general[['x','y']] y_g = sample_general.label weights = np.repeat(1.0, len(sample)) weights = np.append(weights, np.repeat(weight, repeats = bg_size)) rf_s.fit(X_s,y_s,weights) rf_g.fit(X_g,y_g,weights) U = data.loc[unlabeled] entropy = soft_entropy([rf_s, rf_g], U) xstar_index = U.index[np.argmax(entropy)] labeled = labeled.insert(0,xstar_index) unlabeled = unlabeled.drop(xstar_index) selected = data.loc[sample.index.difference(labeled_initial)] plot_classifier(sample, data, rf_s)+geom_point(selected,aes(x='x',y='y'), size=0.1) plot_classifier(sample, data, rf_g)+geom_point(selected,aes(x='x',y='y'), size=0.1) rf = RandomForestClassifier(n_estimators=100, max_features=2) X = sample[['x','y']] y = sample.label rf.fit(X,y) plot_classifier(sample, data, rf)+geom_point(selected,aes(x='x',y='y'), size=0.1) ```
github_jupyter
%matplotlib inline import numpy as np import pandas import sys from plotnine import * sys.path.append('..') from plotting import plot_linear_classifier from plotting import plot_classifier data = pandas.read_pickle('../data/two_2dgaussians.pkl') ggplot(data, aes(x='x', y='y', fill='label', shape='label'))+geom_point(size=3) def plot_linear_classifier_commitee(sample, data, lgs): gp = ggplot(sample, aes(x='x', y='y', fill='label', shape='label')) for lg in lgs: w0 = lg.intercept_[0] w1, w2 = lg.coef_[0] def boundary(x): return (-w0/w2)+(-w1/w2)*x gp = gp+geom_segment(x=data.x.min(),xend=data.x.max(),y=boundary(data.x.min()),yend=boundary(data.x.max())) return (gp+\ geom_point(size=3)+\ geom_point(data, aes(x='x',y='y',color='label'))) def train_commitee(lg, sample): for i in range(0,len(lg)): s = sample.sample(n = round(0.8*len(sample)), replace=True) while len(s.label[s.label==True])==0 or len(s.label[s.label==False])==0: s = sample.sample(n = round(0.8*len(sample)), replace=True) X = s[['x','y']] y = s.label lg[i].fit(X,y) def soft_entropy(lgs, U): p = np.zeros(len(U)) for lg in lgs: p = p + lg.predict_proba(U[['x','y']])[:,1] p = p/float(len(lgs)) #entropy = -(p1 * np.log(p1) + p0 * np.log(p0)) -> bei maximimierung reicht aber distanz von 0.5 return -np.abs(p-0.5) def aktive_learn(num_samples, initial_sample, data, commitee): sample = initial_sample labeled = initial_sample.index unlabeled = data.index[~data.index.isin(sample.index)] for i in range(0,num_samples+1): sample = data.loc[labeled] X = sample[['x','y']] y = sample.label train_commitee(commitee, sample) U = data.loc[unlabeled] entropy = soft_entropy(lg, U) xstar_index = U.index[np.argmax(entropy)] labeled = labeled.insert(0,xstar_index) unlabeled = unlabeled.drop(xstar_index) return sample from sklearn.linear_model import LogisticRegression sample = data.sample(10) C = 5 lg = [LogisticRegression() for i in range(0,C)] train_commitee(lg, sample) plot_linear_classifier_commitee(sample,data,lg) sample = aktive_learn(20, sample, data, lg) plot_linear_classifier_commitee(sample,data,lg) data = pandas.read_pickle('../data/two_2dgaussians_with_outliers.pkl') initial_sample = data.sample(10) sample = aktive_learn(20, initial_sample, data, lg) selected = data.loc[sample.index.difference(initial_sample.index)] plot_linear_classifier_commitee(sample,data,lg)+geom_point(selected,aes(x='x',y='y'), size=0.1) data = pandas.read_pickle('../data/two_2dgaussians_skewed.pkl') initial_sample = data[data.label==True].sample(1) initial_sample = initial_sample.append(data.sample(9)) sample = aktive_learn(20, initial_sample, data, lg) selected = data.loc[sample.index.difference(initial_sample.index)] plot_linear_classifier_commitee(sample,data,lg)+geom_point(selected,aes(x='x',y='y'), size=0.1) def active_learn_gs(num_samples, initial_sample, data, data_bg, model_s, model_g): sample = initial_sample labeled = sample.index unlabeled = data.index[~data.index.isin(sample.index)] bg_size = len(data_bg) specific_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([False], repeats = bg_size)}) general_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([True], repeats = bg_size)}) for i in range(0,21): sample = data.loc[labeled] sample_specific = sample.append(specific_bg, ignore_index=True) sample_general = sample.append(general_bg, ignore_index=True) X_s = sample_specific[['x','y']] y_s = sample_specific.label X_g = sample_general[['x','y']] y_g = sample_general.label weights = np.repeat(1.0, len(sample)) weights = np.append(weights, np.repeat(weight, repeats = bg_size)) model_s.fit(X_s,y_s,weights) model_g.fit(X_g,y_g,weights) U = data.loc[unlabeled] entropy = soft_entropy([lg_s, lg_g], U) xstar_index = U.index[np.argmax(entropy)] labeled = labeled.insert(0,xstar_index) unlabeled = unlabeled.drop(xstar_index) return sample data = pandas.read_pickle('../data/two_2dgaussians.pkl') data = pandas.read_pickle('../data/two_2dgaussians_with_outliers.pkl') data_bg = pandas.read_pickle('../data/two_2dgaussians_bg.pkl') bg_size = len(data_bg) specific_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([False], repeats = bg_size)}) general_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([True], repeats = bg_size)}) sample = data.sample(10) sample_specific = sample.append(specific_bg, ignore_index=True) sample_general = sample.append(general_bg, ignore_index=True) df_plot = data_bg.copy() df_plot['label'] = 'Unkown' lg_s = LogisticRegression() lg_g = LogisticRegression() weight = 4.0/bg_size X_s = sample_specific[['x','y']] y_s = sample_specific.label X_g = sample_general[['x','y']] y_g = sample_general.label weights = np.repeat(1.0, len(sample)) weights = np.append(weights, np.repeat(weight, repeats = bg_size)) lg_s.fit(X_s,y_s,weights) lg_g.fit(X_g,y_g,weights) plot_linear_classifier_commitee(sample,data,[lg_s, lg_g])+geom_point(df_plot, size=0.1)+ \ guides(color=False)+\ guides(shape=False)+\ guides(fill=False) initial_sample = sample sample = active_learn_gs(20, initial_sample, data, data_bg, lg_s, lg_g) plot_linear_classifier_commitee(sample,data,[lg_s, lg_g])+geom_point(df_plot, size=0.1)+ \ guides(color=False)+\ guides(shape=False)+\ guides(fill=False) data = pandas.read_pickle('../data/two_2dgaussians_skewed.pkl') data_bg = pandas.read_pickle('../data/two_2dgaussians_bg_skewed.pkl') initial_sample = data[data.label==True].sample(1) initial_sample = initial_sample.append(data.sample(9)) sample = active_learn_gs(20, initial_sample, data, data_bg, lg_s, lg_g) df_plot = data_bg.copy() df_plot['label'] = 'Unkown' plot_linear_classifier_commitee(sample,data,[lg_s, lg_g])+geom_point(df_plot, size=0.1)+ \ guides(color=False)+\ guides(shape=False)+\ guides(fill=False) data = pandas.read_pickle('../data/three_2dgaussians.pkl') ggplot(data, aes(x='x', y='y', fill='label', shape='label'))+geom_point(size=3) sample = data[data.y<5].sample(20) labeled = sample.index labeled_initial = labeled unlabeled = data.index[~data.index.isin(sample.index)] data_bg = pandas.read_pickle('../data/three_2dgaussians_bg.pkl') bg_size = len(data_bg) specific_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([False], repeats = bg_size)}) general_bg = pandas.DataFrame({'x':data_bg.x,'y':data_bg.y,'label':np.repeat([True], repeats = bg_size)}) sample_specific = sample.append(specific_bg, ignore_index=True) sample_general = sample.append(general_bg, ignore_index=True) from sklearn.ensemble import RandomForestClassifier rf_g = RandomForestClassifier(n_estimators=100, max_features=2, min_samples_leaf=8) rf_s = RandomForestClassifier(n_estimators=100, max_features=2, min_samples_leaf=8) weight = 1.0/bg_size X_s = sample_specific[['x','y']] y_s = sample_specific.label X_g = sample_general[['x','y']] y_g = sample_general.label weights = np.repeat(1.0, len(sample)) weights = np.append(weights, np.repeat(weight, repeats = bg_size)) rf_s.fit(X_s,y_s,weights) rf_g.fit(X_g,y_g,weights) plot_classifier(sample, data, rf_s) plot_classifier(sample, data, rf_g) labeled = sample.index unlabeled = data.index[~data.index.isin(sample.index)] for i in range(0,21): sample = data.loc[labeled] sample_specific = sample.append(specific_bg, ignore_index=True) sample_general = sample.append(general_bg, ignore_index=True) X_s = sample_specific[['x','y']] y_s = sample_specific.label X_g = sample_general[['x','y']] y_g = sample_general.label weights = np.repeat(1.0, len(sample)) weights = np.append(weights, np.repeat(weight, repeats = bg_size)) rf_s.fit(X_s,y_s,weights) rf_g.fit(X_g,y_g,weights) U = data.loc[unlabeled] entropy = soft_entropy([rf_s, rf_g], U) xstar_index = U.index[np.argmax(entropy)] labeled = labeled.insert(0,xstar_index) unlabeled = unlabeled.drop(xstar_index) selected = data.loc[sample.index.difference(labeled_initial)] plot_classifier(sample, data, rf_s)+geom_point(selected,aes(x='x',y='y'), size=0.1) plot_classifier(sample, data, rf_g)+geom_point(selected,aes(x='x',y='y'), size=0.1) rf = RandomForestClassifier(n_estimators=100, max_features=2) X = sample[['x','y']] y = sample.label rf.fit(X,y) plot_classifier(sample, data, rf)+geom_point(selected,aes(x='x',y='y'), size=0.1)
0.347869
0.846038
``` import sys import numpy as np import pysolr from gensim.models import Doc2Vec np.random.seed(42) import smart_open import pandas as pd import gensim # In case your sys.path does not contain the base repo, cd there. print(sys.path) %cd '~/ml-solr-course' model_path = '2-ranking/lab4/airbnb_model' query = 'Midtown sunny chateau' number_of_initial_retrieved = 100 model = None # Load the Doc2Vec model from lab4 print(f'Model loaded') #Instantiate the client solr = None #Search for the query results = [] print(f'Number of results were {len(results)}') # Use the same simple_preprocess from the last lab 4 to tokenize the query tokenized_query = list() tokenized_query inferred_vector = model.infer_vector(tokenized_query) print(inferred_vector) df_results = pd.DataFrame(results) similarities = [] for result in results: similarity = 0 # Find the similarity between the query and the result using gensim similarity_unseen_docs method similarities.append(similarity) df_results["Similarity"] = pd.Series(similarities) # We store the similarities to order ``` Ok, what we have done is do the query against Solr, then finding the similarity between the descriptions of the query and the results. The idea behind the algorithm would be to reorder the results based on the similarity score, not on BM25. Let's see which one is better. ``` df_results.head() a = None # Sort the df_results by similarity column in descending order a = a[:10].reset_index(drop=True) print(f'Most similar document after reranking within retrieved results has description: \n\n{a["description"].iloc[0]}\nWith similarity: {a["Similarity"].iloc[0]}') print(f'Most similar document before reranking within retrieved results has description: \n\n{df_results["description"].iloc[0]}\nWith similarity: {df_results["Similarity"].iloc[0]}') print(f'Number of documents that surpass 0.5 similarity threshold: {len(a[a["Similarity"] >= 0.5])}') ``` It is remarkable how using DBOW the most similar result understood the need for midtown apartments that are chateaus. On the other hand the traditional top result ponderated chateau more just because it is a rare word. It is not a perfect method, but a very good indication. A good idea is to have something like this **between** the raw results (thousands), filter them by similarity (hundreds) and then have a learning to rank recommender (dozens). Tensorflow has opensources TF Recommenders which is great to plug in as an algorithm **after** these results. But this alone would work just fine.
github_jupyter
import sys import numpy as np import pysolr from gensim.models import Doc2Vec np.random.seed(42) import smart_open import pandas as pd import gensim # In case your sys.path does not contain the base repo, cd there. print(sys.path) %cd '~/ml-solr-course' model_path = '2-ranking/lab4/airbnb_model' query = 'Midtown sunny chateau' number_of_initial_retrieved = 100 model = None # Load the Doc2Vec model from lab4 print(f'Model loaded') #Instantiate the client solr = None #Search for the query results = [] print(f'Number of results were {len(results)}') # Use the same simple_preprocess from the last lab 4 to tokenize the query tokenized_query = list() tokenized_query inferred_vector = model.infer_vector(tokenized_query) print(inferred_vector) df_results = pd.DataFrame(results) similarities = [] for result in results: similarity = 0 # Find the similarity between the query and the result using gensim similarity_unseen_docs method similarities.append(similarity) df_results["Similarity"] = pd.Series(similarities) # We store the similarities to order df_results.head() a = None # Sort the df_results by similarity column in descending order a = a[:10].reset_index(drop=True) print(f'Most similar document after reranking within retrieved results has description: \n\n{a["description"].iloc[0]}\nWith similarity: {a["Similarity"].iloc[0]}') print(f'Most similar document before reranking within retrieved results has description: \n\n{df_results["description"].iloc[0]}\nWith similarity: {df_results["Similarity"].iloc[0]}') print(f'Number of documents that surpass 0.5 similarity threshold: {len(a[a["Similarity"] >= 0.5])}')
0.334807
0.54468
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); </script> # ADM Quantities in terms of BSSN Quantities ## Author: Zach Etienne ### Formatting improvements courtesy Brandon Clark [comment]: <> (Abstract: TODO) **Notebook Status:** <font color='orange'><b> Self-Validated </b></font> **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)** ### NRPy+ Source Code for this module: [ADM_in_terms_of_BSSN.py](../edit/BSSN/ADM_in_terms_of_BSSN.py) ## Introduction: This tutorial notebook constructs all quantities in the [ADM formalism](https://en.wikipedia.org/wiki/ADM_formalism) (see also Chapter 2 in Baumgarte & Shapiro's book *Numerical Relativity*) in terms of quantities in our adopted (covariant, tensor-rescaled) BSSN formalism. That is to say, we will write the ADM quantities $\left\{\gamma_{ij},K_{ij},\alpha,\beta^i\right\}$ and their derivatives in terms of the BSSN quantities $\left\{\bar{\gamma}_{ij},\text{cf},\bar{A}_{ij},\text{tr}K,\alpha,\beta^i\right\}$ and their derivatives. ### A Note on Notation: As is standard in NRPy+, * Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component. * Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction. As a corollary, any expressions in NRPy+ involving mixed Greek and Latin indices will need to offset one set of indices by one; a Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook). <a id='toc'></a> # Table of Contents $$\label{toc}$$ This notebook is organized as follows 1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules 1. [Step 2](#threemetric): The ADM three-metric $\gamma_{ij}$ and its derivatives in terms of rescaled BSSN quantities 1. [Step 2.a](#derivatives_e4phi): Derivatives of $e^{4\phi}$ 1. [Step 2.b](#derivatives_adm_3metric): Derivatives of the ADM three-metric: $\gamma_{ij,k}$ and $\gamma_{ij,kl}$ 1. [Step 2.c](#christoffel): Christoffel symbols $\Gamma^i_{jk}$ associated with the ADM 3-metric $\gamma_{ij}$ 1. [Step 3](#extrinsiccurvature): The ADM extrinsic curvature $K_{ij}$ and its derivatives in terms of rescaled BSSN quantities 1. [Step 4](#code_validation): Code Validation against `BSSN.ADM_in_terms_of_BSSN` NRPy+ module 1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file <a id='initializenrpy'></a> # Step 1: Initialize core Python/NRPy+ modules \[Back to [top](#toc)\] $$\label{initializenrpy}$$ Let's start by importing all the needed modules from Python/NRPy+: ``` # Step 1.a: Import all needed modules from NRPy+ import NRPy_param_funcs as par # NRPy+: parameter interface import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import reference_metric as rfm # NRPy+: Reference metric support import sys # Standard Python module for multiplatform OS-level functions # Step 1.b: Set the coordinate system for the numerical grid par.set_parval_from_str("reference_metric::CoordSystem","Spherical") # Step 1.c: Given the chosen coordinate system, set up # corresponding reference metric and needed # reference metric quantities # The following function call sets up the reference metric # and related quantities, including rescaling matrices ReDD, # ReU, and hatted quantities. rfm.reference_metric() # Step 1.d: Set spatial dimension (must be 3 for BSSN, as BSSN is # a 3+1-dimensional decomposition of the general # relativistic field equations) DIM = 3 # Step 1.e: Import all basic (unrescaled) BSSN scalars & tensors import BSSN.BSSN_quantities as Bq Bq.BSSN_basic_tensors() gammabarDD = Bq.gammabarDD cf = Bq.cf AbarDD = Bq.AbarDD trK = Bq.trK Bq.gammabar__inverse_and_derivs() gammabarDD_dD = Bq.gammabarDD_dD gammabarDD_dDD = Bq.gammabarDD_dDD Bq.AbarUU_AbarUD_trAbar_AbarDD_dD() AbarDD_dD = Bq.AbarDD_dD ``` <a id='threemetric'></a> # Step 2: The ADM three-metric $\gamma_{ij}$ and its derivatives in terms of rescaled BSSN quantities. \[Back to [top](#toc)\] $$\label{threemetric}$$ The ADM three-metric is written in terms of the covariant BSSN three-metric tensor as (Eqs. 2 and 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)): $$ \gamma_{ij} = \left(\frac{\gamma}{\bar{\gamma}}\right)^{1/3} \bar{\gamma}_{i j}, $$ where $\gamma=\det{\gamma_{ij}}$ and $\bar{\gamma}=\det{\bar{\gamma}_{ij}}$. The "standard" BSSN conformal factor $\phi$ is given by (Eq. 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)): \begin{align} \phi &= \frac{1}{12} \log\left(\frac{\gamma}{\bar{\gamma}}\right) \\ \implies e^{\phi} &= \left(\frac{\gamma}{\bar{\gamma}}\right)^{1/12} \\ \implies e^{4 \phi} &= \left(\frac{\gamma}{\bar{\gamma}}\right)^{1/3} \end{align} Thus the ADM three-metric may be written in terms of the BSSN three-metric and conformal factor $\phi$ as $$ \gamma_{ij} = e^{4 \phi} \bar{\gamma}_{i j}. $$ NRPy+'s implementation of BSSN allows for $\phi$ and two other alternative conformal factors to be defined: \begin{align} \chi &= e^{-4\phi} \\ W &= e^{-2\phi}, \end{align} Thus if `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"chi"`, then \begin{align} \gamma_{ij} &= \frac{1}{\chi} \bar{\gamma}_{i j} \\ &= \frac{1}{\text{cf}} \bar{\gamma}_{i j}, \end{align} and if `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"W"`, then \begin{align} \gamma_{ij} &= \frac{1}{W^2} \bar{\gamma}_{i j} \\ &= \frac{1}{\text{cf}^2} \bar{\gamma}_{i j}. \end{align} ``` # Step 2: The ADM three-metric gammaDD and its # derivatives in terms of BSSN quantities. gammaDD = ixp.zerorank2() exp4phi = sp.sympify(0) if par.parval_from_str("EvolvedConformalFactor_cf") == "phi": exp4phi = sp.exp(4*cf) elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi": exp4phi = (1 / cf) elif par.parval_from_str("EvolvedConformalFactor_cf") == "W": exp4phi = (1 / cf**2) else: print("Error EvolvedConformalFactor_cf type = \""+par.parval_from_str("EvolvedConformalFactor_cf")+"\" unknown.") sys.exit(1) for i in range(DIM): for j in range(DIM): gammaDD[i][j] = exp4phi*gammabarDD[i][j] ``` <a id='derivatives_e4phi'></a> ## Step 2.a: Derivatives of $e^{4\phi}$ \[Back to [top](#toc)\] $$\label{derivatives_e4phi}$$ To compute derivatives of $\gamma_{ij}$ in terms of BSSN variables and their derivatives, we will first need derivatives of $e^{4\phi}$ in terms of the conformal BSSN variable `cf`. \begin{align} \frac{\partial}{\partial x^i} e^{4\phi} &= 4 e^{4\phi} \phi_{,i} \\ \implies \frac{\partial}{\partial x^j} \frac{\partial}{\partial x^i} e^{4\phi} &= \frac{\partial}{\partial x^j} \left(4 e^{4\phi} \phi_{,i}\right) \\ &= 16 e^{4\phi} \phi_{,i} \phi_{,j} + 4 e^{4\phi} \phi_{,ij} \end{align} Thus computing first and second derivatives of $e^{4\phi}$ in terms of the BSSN quantity `cf` requires only that we evaluate $\phi_{,i}$ and $\phi_{,ij}$ in terms of $e^{4\phi}$ (computed above in terms of `cf`) and derivatives of `cf`: If `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"phi"`, then \begin{align} \phi_{,i} &= \text{cf}_{,i} \\ \phi_{,ij} &= \text{cf}_{,ij} \end{align} If `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"chi"`, then \begin{align} \text{cf} = e^{-4\phi} \implies \text{cf}_{,i} &= -4 e^{-4\phi} \phi_{,i} \\ \implies \phi_{,i} &= -\frac{e^{4\phi}}{4} \text{cf}_{,i} \\ \implies \phi_{,ij} &= -e^{4\phi} \phi_{,j} \text{cf}_{,i} -\frac{e^{4\phi}}{4} \text{cf}_{,ij}\\ &= -e^{4\phi} \left(-\frac{e^{4\phi}}{4} \text{cf}_{,j}\right) \text{cf}_{,i} -\frac{e^{4\phi}}{4} \text{cf}_{,ij} \\ &= \frac{1}{4} \left[\left(e^{4\phi}\right)^2 \text{cf}_{,i} \text{cf}_{,j} -e^{4\phi} \text{cf}_{,ij}\right] \\ \end{align} If `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"W"`, then \begin{align} \text{cf} = e^{-2\phi} \implies \text{cf}_{,i} &= -2 e^{-2\phi} \phi_{,i} \\ \implies \phi_{,i} &= -\frac{e^{2\phi}}{2} \text{cf}_{,i} \\ \implies \phi_{,ij} &= -e^{2\phi} \phi_{,j} \text{cf}_{,i} -\frac{e^{2\phi}}{2} \text{cf}_{,ij}\\ &= -e^{2\phi} \left(-\frac{e^{2\phi}}{2} \text{cf}_{,j}\right) \text{cf}_{,i} -\frac{e^{2\phi}}{2} \text{cf}_{,ij} \\ &= \frac{1}{2} \left[e^{4\phi} \text{cf}_{,i} \text{cf}_{,j} -e^{2\phi} \text{cf}_{,ij}\right] \\ \end{align} ``` # Step 2.a: Derivatives of $e^{4\phi}$ phidD = ixp.zerorank1() phidDD = ixp.zerorank2() cf_dD = ixp.declarerank1("cf_dD") cf_dDD = ixp.declarerank2("cf_dDD","sym01") if par.parval_from_str("EvolvedConformalFactor_cf") == "phi": for i in range(DIM): phidD[i] = cf_dD[i] for j in range(DIM): phidDD[i][j] = cf_dDD[i][j] elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi": for i in range(DIM): phidD[i] = -sp.Rational(1,4)*exp4phi*cf_dD[i] for j in range(DIM): phidDD[i][j] = sp.Rational(1,4)*( exp4phi**2*cf_dD[i]*cf_dD[j] - exp4phi*cf_dDD[i][j] ) elif par.parval_from_str("EvolvedConformalFactor_cf") == "W": exp2phi = (1 / cf) for i in range(DIM): phidD[i] = -sp.Rational(1,2)*exp2phi*cf_dD[i] for j in range(DIM): phidDD[i][j] = sp.Rational(1,2)*( exp4phi*cf_dD[i]*cf_dD[j] - exp2phi*cf_dDD[i][j] ) else: print("Error EvolvedConformalFactor_cf type = \""+par.parval_from_str("EvolvedConformalFactor_cf")+"\" unknown.") sys.exit(1) exp4phidD = ixp.zerorank1() exp4phidDD = ixp.zerorank2() for i in range(DIM): exp4phidD[i] = 4*exp4phi*phidD[i] for j in range(DIM): exp4phidDD[i][j] = 16*exp4phi*phidD[i]*phidD[j] + 4*exp4phi*phidDD[i][j] ``` <a id='derivatives_adm_3metric'></a> ## Step 2.b: Derivatives of the ADM three-metric: $\gamma_{ij,k}$ and $\gamma_{ij,kl}$ \[Back to [top](#toc)\] $$\label{derivatives_adm_3metric}$$ Recall the relation between the ADM three-metric $\gamma_{ij}$, the BSSN conformal three-metric $\bar{\gamma}_{i j}$, and the BSSN conformal factor $\phi$: $$ \gamma_{ij} = e^{4 \phi} \bar{\gamma}_{i j}. $$ Now that we have constructed derivatives of $e^{4 \phi}$ in terms of the chosen BSSN conformal factor `cf`, and the [BSSN.BSSN_quantities module](../edit/BSSN/BSSN_quantities.py) ([**tutorial**](Tutorial-BSSN_quantities.ipynb)) defines derivatives of $\bar{\gamma}_{ij}$ in terms of rescaled BSSN variables, derivatives of $\gamma_{ij}$ can be immediately constructed using the product rule: \begin{align} \gamma_{ij,k} &= \left(e^{4 \phi}\right)_{,k} \bar{\gamma}_{i j} + e^{4 \phi} \bar{\gamma}_{ij,k} \\ \gamma_{ij,kl} &= \left(e^{4 \phi}\right)_{,kl} \bar{\gamma}_{i j} + \left(e^{4 \phi}\right)_{,k} \bar{\gamma}_{i j,l} + \left(e^{4 \phi}\right)_{,l} \bar{\gamma}_{ij,k} + e^{4 \phi} \bar{\gamma}_{ij,kl} \end{align} ``` # Step 2.b: Derivatives of gammaDD, the ADM three-metric gammaDDdD = ixp.zerorank3() gammaDDdDD = ixp.zerorank4() for i in range(DIM): for j in range(DIM): for k in range(DIM): gammaDDdD[i][j][k] = exp4phidD[k]*gammabarDD[i][j] + exp4phi*gammabarDD_dD[i][j][k] for l in range(DIM): gammaDDdDD[i][j][k][l] = exp4phidDD[k][l]*gammabarDD[i][j] + \ exp4phidD[k]*gammabarDD_dD[i][j][l] + \ exp4phidD[l]*gammabarDD_dD[i][j][k] + \ exp4phi*gammabarDD_dDD[i][j][k][l] ``` <a id='christoffel'></a> ## Step 2.c: Christoffel symbols $\Gamma^i_{jk}$ associated with the ADM 3-metric $\gamma_{ij}$ \[Back to [top](#toc)\] $$\label{christoffel}$$ The 3-metric analog to the definition of Christoffel symbol (Eq. 1.18) in Baumgarte & Shapiro's *Numerical Relativity* is given by $$ \Gamma^i_{jk} = \frac{1}{2} \gamma^{il} \left(\gamma_{lj,k} + \gamma_{lk,j} - \gamma_{jk,l} \right), $$ which we implement here: ``` # Step 2.c: 3-Christoffel symbols associated with ADM 3-metric gammaDD # Step 2.c.i: First compute the inverse 3-metric gammaUU: gammaUU, detgamma = ixp.symm_matrix_inverter3x3(gammaDD) GammaUDD = ixp.zerorank3() for i in range(DIM): for j in range(DIM): for k in range(DIM): for l in range(DIM): GammaUDD[i][j][k] += sp.Rational(1,2)*gammaUU[i][l]* \ (gammaDDdD[l][j][k] + gammaDDdD[l][k][j] - gammaDDdD[j][k][l]) ``` <a id='extrinsiccurvature'></a> # Step 3: The ADM extrinsic curvature $K_{ij}$ and its derivatives in terms of rescaled BSSN quantities. \[Back to [top](#toc)\] $$\label{extrinsiccurvature}$$ The ADM extrinsic curvature may be written in terms of the BSSN trace-free extrinsic curvature tensor $\bar{A}_{ij}$ and the trace of the ADM extrinsic curvature $K$: \begin{align} K_{ij} &= \left(\frac{\gamma}{\bar{\gamma}}\right)^{1/3} \bar{A}_{ij} + \frac{1}{3} \gamma_{ij} K \\ &= e^{4\phi} \bar{A}_{ij} + \frac{1}{3} \gamma_{ij} K \\ \end{align} We only compute first spatial derivatives of $K_{ij}$, as higher-derivatives are generally not needed: $$ K_{ij,k} = \left(e^{4\phi}\right)_{,k} \bar{A}_{ij} + e^{4\phi} \bar{A}_{ij,k} + \frac{1}{3} \left(\gamma_{ij,k} K + \gamma_{ij} K_{,k}\right) $$ which is expressed in terms of quantities already defined. ``` # Step 3: Define ADM extrinsic curvature KDD and # its first spatial derivatives KDDdD # in terms of BSSN quantities KDD = ixp.zerorank2() for i in range(DIM): for j in range(DIM): KDD[i][j] = exp4phi*AbarDD[i][j] + sp.Rational(1,3)*gammaDD[i][j]*trK KDDdD = ixp.zerorank3() trK_dD = ixp.declarerank1("trK_dD") for i in range(DIM): for j in range(DIM): for k in range(DIM): KDDdD[i][j][k] = exp4phidD[k]*AbarDD[i][j] + exp4phi*AbarDD_dD[i][j][k] + \ sp.Rational(1,3)*(gammaDDdD[i][j][k]*trK + gammaDD[i][j]*trK_dD[k]) ``` <a id='code_validation'></a> # Step 4: Code Validation against `BSSN.ADM_in_terms_of_BSSN` NRPy+ module \[Back to [top](#toc)\] $$\label{code_validation}$$ Here, as a code validation check, we verify agreement in the SymPy expressions between 1. this tutorial and 2. the NRPy+ [BSSN.ADM_in_terms_of_BSSN](../edit/BSSN/ADM_in_terms_of_BSSN.py) module. ``` all_passed=True def comp_func(expr1,expr2,basename,prefixname2="Bq."): if str(expr1-expr2)!="0": print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2)) all_passed=False def gfnm(basename,idx1,idx2=None,idx3=None,idx4=None): if idx2 is None: return basename+"["+str(idx1)+"]" if idx3 is None: return basename+"["+str(idx1)+"]["+str(idx2)+"]" if idx4 is None: return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]" return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]["+str(idx4)+"]" expr_list = [] exprcheck_list = [] namecheck_list = [] import BSSN.ADM_in_terms_of_BSSN as AB AB.ADM_in_terms_of_BSSN() namecheck_list.extend(["detgamma"]) exprcheck_list.extend([AB.detgamma]) expr_list.extend([detgamma]) for i in range(DIM): for j in range(DIM): namecheck_list.extend([gfnm("gammaDD",i,j),gfnm("gammaUU",i,j),gfnm("KDD",i,j)]) exprcheck_list.extend([AB.gammaDD[i][j],AB.gammaUU[i][j],AB.KDD[i][j]]) expr_list.extend([gammaDD[i][j],gammaUU[i][j],KDD[i][j]]) for k in range(DIM): namecheck_list.extend([gfnm("gammaDDdD",i,j,k),gfnm("GammaUDD",i,j,k),gfnm("KDDdD",i,j,k)]) exprcheck_list.extend([AB.gammaDDdD[i][j][k],AB.GammaUDD[i][j][k],AB.KDDdD[i][j][k]]) expr_list.extend([gammaDDdD[i][j][k],GammaUDD[i][j][k],KDDdD[i][j][k]]) for l in range(DIM): namecheck_list.extend([gfnm("gammaDDdDD",i,j,k,l)]) exprcheck_list.extend([AB.gammaDDdDD[i][j][k][l]]) expr_list.extend([gammaDDdDD[i][j][k][l]]) for i in range(len(expr_list)): comp_func(expr_list[i],exprcheck_list[i],namecheck_list[i]) if all_passed: print("ALL TESTS PASSED!") else: print("ERROR. ONE OR MORE TESTS FAILED") sys.exit(1) ``` <a id='latex_pdf_output'></a> # Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] $$\label{latex_pdf_output}$$ The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-ADM_in_terms_of_BSSN.pdf](Tutorial-ADM_in_terms_of_BSSN.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ``` import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ADM_in_terms_of_BSSN") ```
github_jupyter
# Step 1.a: Import all needed modules from NRPy+ import NRPy_param_funcs as par # NRPy+: parameter interface import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import reference_metric as rfm # NRPy+: Reference metric support import sys # Standard Python module for multiplatform OS-level functions # Step 1.b: Set the coordinate system for the numerical grid par.set_parval_from_str("reference_metric::CoordSystem","Spherical") # Step 1.c: Given the chosen coordinate system, set up # corresponding reference metric and needed # reference metric quantities # The following function call sets up the reference metric # and related quantities, including rescaling matrices ReDD, # ReU, and hatted quantities. rfm.reference_metric() # Step 1.d: Set spatial dimension (must be 3 for BSSN, as BSSN is # a 3+1-dimensional decomposition of the general # relativistic field equations) DIM = 3 # Step 1.e: Import all basic (unrescaled) BSSN scalars & tensors import BSSN.BSSN_quantities as Bq Bq.BSSN_basic_tensors() gammabarDD = Bq.gammabarDD cf = Bq.cf AbarDD = Bq.AbarDD trK = Bq.trK Bq.gammabar__inverse_and_derivs() gammabarDD_dD = Bq.gammabarDD_dD gammabarDD_dDD = Bq.gammabarDD_dDD Bq.AbarUU_AbarUD_trAbar_AbarDD_dD() AbarDD_dD = Bq.AbarDD_dD # Step 2: The ADM three-metric gammaDD and its # derivatives in terms of BSSN quantities. gammaDD = ixp.zerorank2() exp4phi = sp.sympify(0) if par.parval_from_str("EvolvedConformalFactor_cf") == "phi": exp4phi = sp.exp(4*cf) elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi": exp4phi = (1 / cf) elif par.parval_from_str("EvolvedConformalFactor_cf") == "W": exp4phi = (1 / cf**2) else: print("Error EvolvedConformalFactor_cf type = \""+par.parval_from_str("EvolvedConformalFactor_cf")+"\" unknown.") sys.exit(1) for i in range(DIM): for j in range(DIM): gammaDD[i][j] = exp4phi*gammabarDD[i][j] # Step 2.a: Derivatives of $e^{4\phi}$ phidD = ixp.zerorank1() phidDD = ixp.zerorank2() cf_dD = ixp.declarerank1("cf_dD") cf_dDD = ixp.declarerank2("cf_dDD","sym01") if par.parval_from_str("EvolvedConformalFactor_cf") == "phi": for i in range(DIM): phidD[i] = cf_dD[i] for j in range(DIM): phidDD[i][j] = cf_dDD[i][j] elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi": for i in range(DIM): phidD[i] = -sp.Rational(1,4)*exp4phi*cf_dD[i] for j in range(DIM): phidDD[i][j] = sp.Rational(1,4)*( exp4phi**2*cf_dD[i]*cf_dD[j] - exp4phi*cf_dDD[i][j] ) elif par.parval_from_str("EvolvedConformalFactor_cf") == "W": exp2phi = (1 / cf) for i in range(DIM): phidD[i] = -sp.Rational(1,2)*exp2phi*cf_dD[i] for j in range(DIM): phidDD[i][j] = sp.Rational(1,2)*( exp4phi*cf_dD[i]*cf_dD[j] - exp2phi*cf_dDD[i][j] ) else: print("Error EvolvedConformalFactor_cf type = \""+par.parval_from_str("EvolvedConformalFactor_cf")+"\" unknown.") sys.exit(1) exp4phidD = ixp.zerorank1() exp4phidDD = ixp.zerorank2() for i in range(DIM): exp4phidD[i] = 4*exp4phi*phidD[i] for j in range(DIM): exp4phidDD[i][j] = 16*exp4phi*phidD[i]*phidD[j] + 4*exp4phi*phidDD[i][j] # Step 2.b: Derivatives of gammaDD, the ADM three-metric gammaDDdD = ixp.zerorank3() gammaDDdDD = ixp.zerorank4() for i in range(DIM): for j in range(DIM): for k in range(DIM): gammaDDdD[i][j][k] = exp4phidD[k]*gammabarDD[i][j] + exp4phi*gammabarDD_dD[i][j][k] for l in range(DIM): gammaDDdDD[i][j][k][l] = exp4phidDD[k][l]*gammabarDD[i][j] + \ exp4phidD[k]*gammabarDD_dD[i][j][l] + \ exp4phidD[l]*gammabarDD_dD[i][j][k] + \ exp4phi*gammabarDD_dDD[i][j][k][l] # Step 2.c: 3-Christoffel symbols associated with ADM 3-metric gammaDD # Step 2.c.i: First compute the inverse 3-metric gammaUU: gammaUU, detgamma = ixp.symm_matrix_inverter3x3(gammaDD) GammaUDD = ixp.zerorank3() for i in range(DIM): for j in range(DIM): for k in range(DIM): for l in range(DIM): GammaUDD[i][j][k] += sp.Rational(1,2)*gammaUU[i][l]* \ (gammaDDdD[l][j][k] + gammaDDdD[l][k][j] - gammaDDdD[j][k][l]) # Step 3: Define ADM extrinsic curvature KDD and # its first spatial derivatives KDDdD # in terms of BSSN quantities KDD = ixp.zerorank2() for i in range(DIM): for j in range(DIM): KDD[i][j] = exp4phi*AbarDD[i][j] + sp.Rational(1,3)*gammaDD[i][j]*trK KDDdD = ixp.zerorank3() trK_dD = ixp.declarerank1("trK_dD") for i in range(DIM): for j in range(DIM): for k in range(DIM): KDDdD[i][j][k] = exp4phidD[k]*AbarDD[i][j] + exp4phi*AbarDD_dD[i][j][k] + \ sp.Rational(1,3)*(gammaDDdD[i][j][k]*trK + gammaDD[i][j]*trK_dD[k]) all_passed=True def comp_func(expr1,expr2,basename,prefixname2="Bq."): if str(expr1-expr2)!="0": print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2)) all_passed=False def gfnm(basename,idx1,idx2=None,idx3=None,idx4=None): if idx2 is None: return basename+"["+str(idx1)+"]" if idx3 is None: return basename+"["+str(idx1)+"]["+str(idx2)+"]" if idx4 is None: return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]" return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]["+str(idx4)+"]" expr_list = [] exprcheck_list = [] namecheck_list = [] import BSSN.ADM_in_terms_of_BSSN as AB AB.ADM_in_terms_of_BSSN() namecheck_list.extend(["detgamma"]) exprcheck_list.extend([AB.detgamma]) expr_list.extend([detgamma]) for i in range(DIM): for j in range(DIM): namecheck_list.extend([gfnm("gammaDD",i,j),gfnm("gammaUU",i,j),gfnm("KDD",i,j)]) exprcheck_list.extend([AB.gammaDD[i][j],AB.gammaUU[i][j],AB.KDD[i][j]]) expr_list.extend([gammaDD[i][j],gammaUU[i][j],KDD[i][j]]) for k in range(DIM): namecheck_list.extend([gfnm("gammaDDdD",i,j,k),gfnm("GammaUDD",i,j,k),gfnm("KDDdD",i,j,k)]) exprcheck_list.extend([AB.gammaDDdD[i][j][k],AB.GammaUDD[i][j][k],AB.KDDdD[i][j][k]]) expr_list.extend([gammaDDdD[i][j][k],GammaUDD[i][j][k],KDDdD[i][j][k]]) for l in range(DIM): namecheck_list.extend([gfnm("gammaDDdDD",i,j,k,l)]) exprcheck_list.extend([AB.gammaDDdDD[i][j][k][l]]) expr_list.extend([gammaDDdDD[i][j][k][l]]) for i in range(len(expr_list)): comp_func(expr_list[i],exprcheck_list[i],namecheck_list[i]) if all_passed: print("ALL TESTS PASSED!") else: print("ERROR. ONE OR MORE TESTS FAILED") sys.exit(1) import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ADM_in_terms_of_BSSN")
0.460774
0.880951
Importing and formatting data Import MNIST dataset of 60,000 training images and 10,000 testing images ``` import tensorflow as tf physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) # For drawing the MNIST digits as well as plots to help us evaluate performance we # will make extensive use of matplotlib from matplotlib import pyplot as plt # All of the Keras datasets are in keras.datasets from tensorflow.keras.datasets import mnist # Keras has already split the data into training and test data (training_images, training_labels), (test_images, test_labels) = mnist.load_data() # Training images is a list of 60,000 2D lists. # Each 2D list is 28 by 28—the size of the MNIST pixel data. # Each item in the 2D array is an integer from 0 to 255 representing its grayscale # intensity where 0 means white, 255 means black. print(len(training_images), training_images[0].shape) # training_labels are a value between 0 and 9 indicating which digit is represented. # The first item in the training data is a 5 print(len(training_labels), training_labels[0]) ``` Visualize the first 100 images in the dataset ``` # Lets visualize the first 100 images from the dataset for i in range(100): ax = plt.subplot(10, 10, i+1) ax.axis('off') plt.imshow(training_images[i], cmap='Greys') ``` Fixing the data format: using `numpy.reshape` and `keras.util.to_categorical` ``` from tensorflow.keras.utils import to_categorical # Preparing the dataset # Setup train and test splits (training_images, training_labels), (test_images, test_labels) = mnist.load_data() # 28 x 28 = 784, because that's the dimensions of the MNIST data. image_size = 784 # Reshaping the training_images and test_images to lists of vectors with length 784 # instead of lists of 2D arrays. Same for the test_images training_data = training_images.reshape(training_images.shape[0], image_size) test_data = test_images.reshape(test_images.shape[0], image_size) # [ # [1,2,3] # [4,5,6] # ] # => [1,2,3,4,5,6] # Just showing the changes... print("training data: ", training_images.shape, " ==> ", training_data.shape) print("test data: ", test_images.shape, " ==> ", test_data.shape) # Create 1-hot encoded vectors using to_categorical num_classes = 10 # Because it's how many digits we have (0-9) # to_categorical takes a list of integers (our labels) and makes them into 1-hot vectors training_labels = to_categorical(training_labels, num_classes) test_labels = to_categorical(test_labels, num_classes) # Recall that before this transformation, training_labels[0] was the value 5. Look now: print(training_labels[0]) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense # Using Leakly ReLU is slightly different in Keras, which can be annoying. # Additionally, Keras allows us to choose any slope we want for the "leaky" part # rather than being statically 0.01 as in the above two functions. from tensorflow.keras.layers import LeakyReLU # Sequential models are a series of layers applied linearly. medium_model = Sequential() # The first layer must specify it's input_shape. # This is how the first two layers are added, the input layer and the hidden layer. medium_model.add(Dense(units=30, input_shape=(image_size,))) medium_model.add(LeakyReLU(alpha=.1)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.09)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.08)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.07)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.06)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.05)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.04)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.03)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.02)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.01)) # This is how the output layer gets added, the 'softmax' activation function ensures # that the sum of the values in the output nodes is 1. Softmax is very # common in classification networks. medium_model.add(Dense(units=num_classes, activation='softmax')) # This function provides useful text data for our network medium_model.summary() ``` Compiling and training the model ``` # sgd stands for stochastic gradient descent. # categorical_crossentropy is a common loss function used for categorical classification. # accuracy is the percent of predictions that were correct. medium_model.compile(optimizer="nadam", loss='kullback_leibler_divergence', metrics=['accuracy']) # The network will make predictions for 128 flattened images per correction. # It will make a prediction on each item in the training set 5 times (5 epochs) # And 10% of the data will be used as validation data. history = medium_model.fit(training_data, training_labels, batch_size=128, epochs=30, verbose=True, validation_split=.1) ``` Evaluating our model ``` loss, accuracy = medium_model.evaluate(test_data, test_labels, verbose=True) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['training', 'validation'], loc='best') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['training', 'validation'], loc='best') plt.show() print(f'Test loss: {loss:.3}') print(f'Test accuracy: {accuracy:.3}') print(history.history['accuracy']) print(history.history['val_accuracy']) ``` Look at specific results ``` from numpy import argmax # Predicting once, then we can use these repeatedly in the next cell without recomputing the predictions. predictions = medium_model.predict(test_data) # For pagination & style in second cell page = 0 fontdict = {'color': 'black'} # Repeatedly running this cell will page through the predictions for i in range(16): ax = plt.subplot(4, 4, i+1) ax.axis('off') plt.imshow(test_images[i + page], cmap='Greys') prediction = argmax(predictions[i + page]) true_value = argmax(test_labels[i + page]) fontdict['color'] = 'black' if prediction == true_value else 'red' plt.title("{}, {}".format(prediction, true_value), fontdict=fontdict) page += 16 plt.tight_layout() plt.show() ```
github_jupyter
import tensorflow as tf physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) # For drawing the MNIST digits as well as plots to help us evaluate performance we # will make extensive use of matplotlib from matplotlib import pyplot as plt # All of the Keras datasets are in keras.datasets from tensorflow.keras.datasets import mnist # Keras has already split the data into training and test data (training_images, training_labels), (test_images, test_labels) = mnist.load_data() # Training images is a list of 60,000 2D lists. # Each 2D list is 28 by 28—the size of the MNIST pixel data. # Each item in the 2D array is an integer from 0 to 255 representing its grayscale # intensity where 0 means white, 255 means black. print(len(training_images), training_images[0].shape) # training_labels are a value between 0 and 9 indicating which digit is represented. # The first item in the training data is a 5 print(len(training_labels), training_labels[0]) # Lets visualize the first 100 images from the dataset for i in range(100): ax = plt.subplot(10, 10, i+1) ax.axis('off') plt.imshow(training_images[i], cmap='Greys') from tensorflow.keras.utils import to_categorical # Preparing the dataset # Setup train and test splits (training_images, training_labels), (test_images, test_labels) = mnist.load_data() # 28 x 28 = 784, because that's the dimensions of the MNIST data. image_size = 784 # Reshaping the training_images and test_images to lists of vectors with length 784 # instead of lists of 2D arrays. Same for the test_images training_data = training_images.reshape(training_images.shape[0], image_size) test_data = test_images.reshape(test_images.shape[0], image_size) # [ # [1,2,3] # [4,5,6] # ] # => [1,2,3,4,5,6] # Just showing the changes... print("training data: ", training_images.shape, " ==> ", training_data.shape) print("test data: ", test_images.shape, " ==> ", test_data.shape) # Create 1-hot encoded vectors using to_categorical num_classes = 10 # Because it's how many digits we have (0-9) # to_categorical takes a list of integers (our labels) and makes them into 1-hot vectors training_labels = to_categorical(training_labels, num_classes) test_labels = to_categorical(test_labels, num_classes) # Recall that before this transformation, training_labels[0] was the value 5. Look now: print(training_labels[0]) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense # Using Leakly ReLU is slightly different in Keras, which can be annoying. # Additionally, Keras allows us to choose any slope we want for the "leaky" part # rather than being statically 0.01 as in the above two functions. from tensorflow.keras.layers import LeakyReLU # Sequential models are a series of layers applied linearly. medium_model = Sequential() # The first layer must specify it's input_shape. # This is how the first two layers are added, the input layer and the hidden layer. medium_model.add(Dense(units=30, input_shape=(image_size,))) medium_model.add(LeakyReLU(alpha=.1)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.09)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.08)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.07)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.06)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.05)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.04)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.03)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.02)) medium_model.add(Dense(units=30)) medium_model.add(LeakyReLU(alpha=.01)) # This is how the output layer gets added, the 'softmax' activation function ensures # that the sum of the values in the output nodes is 1. Softmax is very # common in classification networks. medium_model.add(Dense(units=num_classes, activation='softmax')) # This function provides useful text data for our network medium_model.summary() # sgd stands for stochastic gradient descent. # categorical_crossentropy is a common loss function used for categorical classification. # accuracy is the percent of predictions that were correct. medium_model.compile(optimizer="nadam", loss='kullback_leibler_divergence', metrics=['accuracy']) # The network will make predictions for 128 flattened images per correction. # It will make a prediction on each item in the training set 5 times (5 epochs) # And 10% of the data will be used as validation data. history = medium_model.fit(training_data, training_labels, batch_size=128, epochs=30, verbose=True, validation_split=.1) loss, accuracy = medium_model.evaluate(test_data, test_labels, verbose=True) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['training', 'validation'], loc='best') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['training', 'validation'], loc='best') plt.show() print(f'Test loss: {loss:.3}') print(f'Test accuracy: {accuracy:.3}') print(history.history['accuracy']) print(history.history['val_accuracy']) from numpy import argmax # Predicting once, then we can use these repeatedly in the next cell without recomputing the predictions. predictions = medium_model.predict(test_data) # For pagination & style in second cell page = 0 fontdict = {'color': 'black'} # Repeatedly running this cell will page through the predictions for i in range(16): ax = plt.subplot(4, 4, i+1) ax.axis('off') plt.imshow(test_images[i + page], cmap='Greys') prediction = argmax(predictions[i + page]) true_value = argmax(test_labels[i + page]) fontdict['color'] = 'black' if prediction == true_value else 'red' plt.title("{}, {}".format(prediction, true_value), fontdict=fontdict) page += 16 plt.tight_layout() plt.show()
0.885866
0.989296
<table> <tr> <td style="background-color:#ffffff;"><a href="https://qsoftware.lu.lv/index.php/qworld/" target="_blank"><img src="..\images\qworld.jpg" width="70%" align="left"></a></td> <td style="background-color:#ffffff;" width="*"></td> <td style="background-color:#ffffff;vertical-align:text-top;"><a href="https://qsoftware.lu.lv" target="_blank"><img src="..\images\logo.jpg" width="25%" align="right"></a></td> </tr> <tr><td colspan="3" align="right" style="color:#777777;background-color:#ffffff;font-size:12px;"> prepared by Maksim Dimitrijev </td></tr> <tr><td colspan="3" align="right" style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;"> This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr> </table> $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ <h2> <font color="blue"> Solutions for </font>Grover's Search: Implementation</h2> <a id="task1"></a> <h3>Task 1</h3> Implement the query operation for $n=2$ ($N=4$). Define a function which marks any one of asked elements. As a result you need to define the following function: <i>query(circuit,quantum_reg,number)</i>, where: <ul> <li><i>circuit</i> allows to pass the quantum circuit;</li> <li><i>quantum_reg</i> allows to pass the quantum register;</li> <li><i>number</i> is the number of marked element, between 0 and 3, where 0 corresponds to 00 and 3 corresponds to 11 (like binary numbers :) ).</li> </ul> <h3>Solution</h3> ``` #number - marked element, between 0 and 3. def query(circuit,quantum_reg,number): # prepare ancilla qubit circuit.x(quantum_reg[2]) circuit.h(quantum_reg[2]) if(number%2 == 0): circuit.x(quantum_reg[0]) if(number < 2): circuit.x(quantum_reg[1]) circuit.ccx(quantum_reg[0],quantum_reg[1],quantum_reg[2]) if(number < 2): circuit.x(quantum_reg[1]) if(number%2 == 0): circuit.x(quantum_reg[0]) # put ancilla qubit back into state |0> circuit.h(quantum_reg[2]) circuit.x(quantum_reg[2]) ``` You can play around with the following code to see that your function is implementing the query operation. How to use this to mark 2 elements? ``` from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer qreg3 = QuantumRegister(3) creg3 = ClassicalRegister(3) mycircuit3 = QuantumCircuit(qreg3,creg3) #Any value between 0 and 3. query(mycircuit3,qreg3,1) #Uncomment the next line to mark additional element. #query(mycircuit3,qreg3,2) job = execute(mycircuit3,Aer.get_backend('unitary_simulator')) u=job.result().get_unitary(mycircuit3,decimals=3) for i in range(len(u)): s="" for j in range(len(u)): val = str(u[i][j].real) while(len(val)<5): val = " "+val s = s + val print(s) ``` <a id="task2"></a> <h3>Task 2 (Optional, challenging)</h3> Implement the query operation for $n=3$ ($N=8$). To implements this operation you will need 5 qubits (1 additional qubit to implement controlled operations + ancilla). Use the qubit 3 as additional qubit and qubit 4 as ancilla. <h3>Solution</h3> ``` #number - marked element, between 0 and 7. def big_query(circuit,quantum_reg,number): # prepare ancilla qubit circuit.x(quantum_reg[4]) circuit.h(quantum_reg[4]) if(number%2 == 0): circuit.x(quantum_reg[0]) if(number%4 < 2): circuit.x(quantum_reg[1]) if(number < 4): circuit.x(quantum_reg[2]) circuit.ccx(quantum_reg[0],quantum_reg[1],quantum_reg[3]) circuit.ccx(quantum_reg[2],quantum_reg[3],quantum_reg[4]) circuit.ccx(quantum_reg[0],quantum_reg[1],quantum_reg[3]) if(number < 4): circuit.x(quantum_reg[2]) if(number%4 < 2): circuit.x(quantum_reg[1]) if(number%2 == 0): circuit.x(quantum_reg[0]) # put ancilla qubit back into state |0> circuit.h(quantum_reg[4]) circuit.x(quantum_reg[4]) ``` You can play around with the following code to see that your function is implementing the query operation. ``` from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer big_qreg = QuantumRegister(5) big_creg = ClassicalRegister(5) big_mycircuit = QuantumCircuit(big_qreg,big_creg) #Any value between 0 and 7. big_query(big_mycircuit,big_qreg,5) job = execute(big_mycircuit,Aer.get_backend('unitary_simulator')) u=job.result().get_unitary(big_mycircuit,decimals=3) # print top-left 8x8 entries of the matrix. for i in range(8): s="" for j in range(8): val = str(u[i][j].real) while(len(val)<5): val = " "+val s = s + val print(s) ``` <a id="task3"></a> <h3>Task 3</h3> Implement the inversion operation for 4 elements. In the implementation the ancilla qubit will be qubit 2, while qubits for control are 0 and 1. As a result you should obtain the following values in the top-left $4 \times 4$ entries: $\mymatrix{cccc}{-0.5 & 0.5 & 0.5 & 0.5 \\ 0.5 & -0.5 & 0.5 & 0.5 \\ 0.5 & 0.5 & -0.5 & 0.5 \\ 0.5 & 0.5 & 0.5 & -0.5}$. <h3>Solution</h3> ``` def inversion(circuit,quantum_reg): #step 1 circuit.x(quantum_reg[2]) circuit.h(quantum_reg[2]) #step 2 circuit.h(quantum_reg[1]) circuit.h(quantum_reg[0]) circuit.x(quantum_reg[1]) circuit.x(quantum_reg[0]) #step 3 circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[2]) #step 4 circuit.x(quantum_reg[2]) #step 5 circuit.x(quantum_reg[1]) circuit.x(quantum_reg[0]) circuit.h(quantum_reg[1]) circuit.h(quantum_reg[0]) #step 6 circuit.h(quantum_reg[2]) circuit.x(quantum_reg[2]) ``` Below you can check the matrix of your inversion operator and how does the circuit look like. We are interested in top-left $4 \times 4$ part of the matrix, the remaining parts are because we used ancilla qubit. ``` from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer qreg4 = QuantumRegister(3) creg4 = ClassicalRegister(3) mycircuit4 = QuantumCircuit(qreg4,creg4) inversion(mycircuit4,qreg4) job = execute(mycircuit4,Aer.get_backend('unitary_simulator')) u=job.result().get_unitary(mycircuit4,decimals=3) for i in range(len(u)): s="" for j in range(len(u)): val = str(u[i][j].real) while(len(val)<5): val = " "+val s = s + val print(s) mycircuit4.draw(output='mpl') ``` <a id="task4"></a> <h3>Task 4 (Optional, challenging)</h3> Implement the inversion operation for $n=3$ ($N=8$). This time you will need 5 qubits - 3 for the operation, 1 for ancilla, and one more qubit to ensure the operation of 3 qubits controlling the other qubit. In the implementation the ancilla qubit will be qubit 4, while qubits for control are 0, 1 and 2; qubit 3 is used to ensure this multiple control operation. As a result you should obtain the following values in the top-left $8 \times 8$ entries: $\mymatrix{cccccccc}{-0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\ 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\ 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\ 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 \\ 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 \\ 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 \\ 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 \\ 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75}$. <h3>Solution</h3> ``` def big_inversion(circuit,quantum_reg): circuit.x(quantum_reg[4]) circuit.h(quantum_reg[4]) for i in range(3): circuit.h(quantum_reg[i]) circuit.x(quantum_reg[i]) circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[3]) circuit.ccx(quantum_reg[2],quantum_reg[3],quantum_reg[4]) circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[3]) circuit.x(quantum_reg[4]) for i in range(3): circuit.x(quantum_reg[i]) circuit.h(quantum_reg[i]) circuit.h(quantum_reg[4]) circuit.x(quantum_reg[4]) ``` Below you can check the matrix of your inversion operator. We are interested in the top-left $8 \times 8$ part of the matrix, the remaining parts are because of additional qubits. ``` from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer big_qreg2 = QuantumRegister(5) big_creg2 = ClassicalRegister(5) big_mycircuit2 = QuantumCircuit(big_qreg2,big_creg2) big_inversion(big_mycircuit2,big_qreg2) job = execute(big_mycircuit2,Aer.get_backend('unitary_simulator')) u=job.result().get_unitary(big_mycircuit2,decimals=3) for i in range(8): s="" for j in range(8): val = str(u[i][j].real) while(len(val)<6): val = " "+val s = s + val print(s) ```
github_jupyter
#number - marked element, between 0 and 3. def query(circuit,quantum_reg,number): # prepare ancilla qubit circuit.x(quantum_reg[2]) circuit.h(quantum_reg[2]) if(number%2 == 0): circuit.x(quantum_reg[0]) if(number < 2): circuit.x(quantum_reg[1]) circuit.ccx(quantum_reg[0],quantum_reg[1],quantum_reg[2]) if(number < 2): circuit.x(quantum_reg[1]) if(number%2 == 0): circuit.x(quantum_reg[0]) # put ancilla qubit back into state |0> circuit.h(quantum_reg[2]) circuit.x(quantum_reg[2]) from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer qreg3 = QuantumRegister(3) creg3 = ClassicalRegister(3) mycircuit3 = QuantumCircuit(qreg3,creg3) #Any value between 0 and 3. query(mycircuit3,qreg3,1) #Uncomment the next line to mark additional element. #query(mycircuit3,qreg3,2) job = execute(mycircuit3,Aer.get_backend('unitary_simulator')) u=job.result().get_unitary(mycircuit3,decimals=3) for i in range(len(u)): s="" for j in range(len(u)): val = str(u[i][j].real) while(len(val)<5): val = " "+val s = s + val print(s) #number - marked element, between 0 and 7. def big_query(circuit,quantum_reg,number): # prepare ancilla qubit circuit.x(quantum_reg[4]) circuit.h(quantum_reg[4]) if(number%2 == 0): circuit.x(quantum_reg[0]) if(number%4 < 2): circuit.x(quantum_reg[1]) if(number < 4): circuit.x(quantum_reg[2]) circuit.ccx(quantum_reg[0],quantum_reg[1],quantum_reg[3]) circuit.ccx(quantum_reg[2],quantum_reg[3],quantum_reg[4]) circuit.ccx(quantum_reg[0],quantum_reg[1],quantum_reg[3]) if(number < 4): circuit.x(quantum_reg[2]) if(number%4 < 2): circuit.x(quantum_reg[1]) if(number%2 == 0): circuit.x(quantum_reg[0]) # put ancilla qubit back into state |0> circuit.h(quantum_reg[4]) circuit.x(quantum_reg[4]) from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer big_qreg = QuantumRegister(5) big_creg = ClassicalRegister(5) big_mycircuit = QuantumCircuit(big_qreg,big_creg) #Any value between 0 and 7. big_query(big_mycircuit,big_qreg,5) job = execute(big_mycircuit,Aer.get_backend('unitary_simulator')) u=job.result().get_unitary(big_mycircuit,decimals=3) # print top-left 8x8 entries of the matrix. for i in range(8): s="" for j in range(8): val = str(u[i][j].real) while(len(val)<5): val = " "+val s = s + val print(s) def inversion(circuit,quantum_reg): #step 1 circuit.x(quantum_reg[2]) circuit.h(quantum_reg[2]) #step 2 circuit.h(quantum_reg[1]) circuit.h(quantum_reg[0]) circuit.x(quantum_reg[1]) circuit.x(quantum_reg[0]) #step 3 circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[2]) #step 4 circuit.x(quantum_reg[2]) #step 5 circuit.x(quantum_reg[1]) circuit.x(quantum_reg[0]) circuit.h(quantum_reg[1]) circuit.h(quantum_reg[0]) #step 6 circuit.h(quantum_reg[2]) circuit.x(quantum_reg[2]) from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer qreg4 = QuantumRegister(3) creg4 = ClassicalRegister(3) mycircuit4 = QuantumCircuit(qreg4,creg4) inversion(mycircuit4,qreg4) job = execute(mycircuit4,Aer.get_backend('unitary_simulator')) u=job.result().get_unitary(mycircuit4,decimals=3) for i in range(len(u)): s="" for j in range(len(u)): val = str(u[i][j].real) while(len(val)<5): val = " "+val s = s + val print(s) mycircuit4.draw(output='mpl') def big_inversion(circuit,quantum_reg): circuit.x(quantum_reg[4]) circuit.h(quantum_reg[4]) for i in range(3): circuit.h(quantum_reg[i]) circuit.x(quantum_reg[i]) circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[3]) circuit.ccx(quantum_reg[2],quantum_reg[3],quantum_reg[4]) circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[3]) circuit.x(quantum_reg[4]) for i in range(3): circuit.x(quantum_reg[i]) circuit.h(quantum_reg[i]) circuit.h(quantum_reg[4]) circuit.x(quantum_reg[4]) from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer big_qreg2 = QuantumRegister(5) big_creg2 = ClassicalRegister(5) big_mycircuit2 = QuantumCircuit(big_qreg2,big_creg2) big_inversion(big_mycircuit2,big_qreg2) job = execute(big_mycircuit2,Aer.get_backend('unitary_simulator')) u=job.result().get_unitary(big_mycircuit2,decimals=3) for i in range(8): s="" for j in range(8): val = str(u[i][j].real) while(len(val)<6): val = " "+val s = s + val print(s)
0.271541
0.994698
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"></ul></div> ``` import requests import json import pandas as pd API_ENDPOINT='https://api.wit.ai/entities/chemical_substance/keywords?v=20200624' wit_access_token='FE5AVTCKI4WL7X2S4RCZPS4L7D53S5QP' def createKeyword(): #data_excel=pd.read_excel('wit-training-model.xlsx') #data_table=pd.DataFrame(dataexcel,columns=['Intent','Entity','Keywords','Synonym_1','Synonym_2','Synonym_3','Synonym_4','PubChem_CID']) #241-313 #choose one entity and add n keywords with n synonyms per keyword created entities=['chemical_substance'] keyword=['Hydrochloric Acid'] synonyms=[['Acid, Hydrochloric','Acid, Muriatic','Hydrogen Chloride','Chloride, Hydrogen','Hydrochloric Acid']] #roles=[['hazard_good_spec'],['storage_substance','mixture_compound'],['chemical_characteristic'],['fluid_state']] for i in range(len(keyword)): #usando plantilla de excel #data_roles=data_table.iloc[i]['roles'] #data_keyword=data_table.iloc[i]['Keywords'] #data_synonyms=data_table.iloc[i]['Synonym_1'] #data_entity=data_table.iloc[i]['Entity'] #API_ENDPOINT='https://api.wit.ai/entities/'+data_entity+'/keywords?v=20200624' data_roles={'roles':roles[i]} data_keyword={'keyword':keyword[i]} data_synonyms={'synonyms':synonyms[i]} data={**data_roles,**data_keyword,**data_synonyms} #dat={'name':'chemical_substance','roles':['storage_substance','mixture_compound']} #dat={'name':'get_storage_compatibility','roles':['class_of_substance']} #print(newintent) headers = {'authorization': 'Bearer ' + wit_access_token,'Content-Type': 'application/json'} resp=requests.post(API_ENDPOINT,headers=headers,json=data) data=json.loads(resp.content) print(data) return data if __name__ == '__main__': #intent=input() textt=createKeyword() if textt is None: print("\n Result: Intent Saved on Wit.ai Success (200){}") import requests import json entity=['chemical_substance','fluid_phase'] API_ENDPOINT='https://api.wit.ai/entities/'+entity+'/keywords?v=20200624' #API_ENDPOINT='https://api.wit.ai/entities/chemical_substance/keywords?v=20200624' wit_access_token='FE5AVTCKI4WL7X2S4RCZPS4L7D53S5QP' def createKeyword(): #241-313 #choose one entity and add n keywords with n synonyms per keyword created entities=['chemical_substance'] keyword=['Hydrochloric Acid'] synonyms=[['Acid, Hydrochloric','Acid, Muriatic','Hydrogen Chloride','Chloride, Hydrogen','Hydrochloric Acid']] #roles=[['hazard_good_spec'],['storage_substance','mixture_compound'],['chemical_characteristic'],['fluid_state']] for i in range(len(keyword)): d1={'keyword':keyword[i]} d2={'synonyms':synonyms[i]} dat={**d1,**d2} #dat={'name':'chemical_substance','roles':['storage_substance','mixture_compound']} #dat={'name':'get_storage_compatibility','roles':['class_of_substance']} #print(newintent) headers = {'authorization': 'Bearer ' + wit_access_token,'Content-Type': 'application/json'} resp=requests.post(API_ENDPOINT,headers=headers,json=dat) data=json.loads(resp.content) print(data) return data if __name__ == '__main__': #intent=input() textt=createKeyword() if textt is None: print("\n Result: Intent Saved on Wit.ai Success (200){}") ```
github_jupyter
import requests import json import pandas as pd API_ENDPOINT='https://api.wit.ai/entities/chemical_substance/keywords?v=20200624' wit_access_token='FE5AVTCKI4WL7X2S4RCZPS4L7D53S5QP' def createKeyword(): #data_excel=pd.read_excel('wit-training-model.xlsx') #data_table=pd.DataFrame(dataexcel,columns=['Intent','Entity','Keywords','Synonym_1','Synonym_2','Synonym_3','Synonym_4','PubChem_CID']) #241-313 #choose one entity and add n keywords with n synonyms per keyword created entities=['chemical_substance'] keyword=['Hydrochloric Acid'] synonyms=[['Acid, Hydrochloric','Acid, Muriatic','Hydrogen Chloride','Chloride, Hydrogen','Hydrochloric Acid']] #roles=[['hazard_good_spec'],['storage_substance','mixture_compound'],['chemical_characteristic'],['fluid_state']] for i in range(len(keyword)): #usando plantilla de excel #data_roles=data_table.iloc[i]['roles'] #data_keyword=data_table.iloc[i]['Keywords'] #data_synonyms=data_table.iloc[i]['Synonym_1'] #data_entity=data_table.iloc[i]['Entity'] #API_ENDPOINT='https://api.wit.ai/entities/'+data_entity+'/keywords?v=20200624' data_roles={'roles':roles[i]} data_keyword={'keyword':keyword[i]} data_synonyms={'synonyms':synonyms[i]} data={**data_roles,**data_keyword,**data_synonyms} #dat={'name':'chemical_substance','roles':['storage_substance','mixture_compound']} #dat={'name':'get_storage_compatibility','roles':['class_of_substance']} #print(newintent) headers = {'authorization': 'Bearer ' + wit_access_token,'Content-Type': 'application/json'} resp=requests.post(API_ENDPOINT,headers=headers,json=data) data=json.loads(resp.content) print(data) return data if __name__ == '__main__': #intent=input() textt=createKeyword() if textt is None: print("\n Result: Intent Saved on Wit.ai Success (200){}") import requests import json entity=['chemical_substance','fluid_phase'] API_ENDPOINT='https://api.wit.ai/entities/'+entity+'/keywords?v=20200624' #API_ENDPOINT='https://api.wit.ai/entities/chemical_substance/keywords?v=20200624' wit_access_token='FE5AVTCKI4WL7X2S4RCZPS4L7D53S5QP' def createKeyword(): #241-313 #choose one entity and add n keywords with n synonyms per keyword created entities=['chemical_substance'] keyword=['Hydrochloric Acid'] synonyms=[['Acid, Hydrochloric','Acid, Muriatic','Hydrogen Chloride','Chloride, Hydrogen','Hydrochloric Acid']] #roles=[['hazard_good_spec'],['storage_substance','mixture_compound'],['chemical_characteristic'],['fluid_state']] for i in range(len(keyword)): d1={'keyword':keyword[i]} d2={'synonyms':synonyms[i]} dat={**d1,**d2} #dat={'name':'chemical_substance','roles':['storage_substance','mixture_compound']} #dat={'name':'get_storage_compatibility','roles':['class_of_substance']} #print(newintent) headers = {'authorization': 'Bearer ' + wit_access_token,'Content-Type': 'application/json'} resp=requests.post(API_ENDPOINT,headers=headers,json=dat) data=json.loads(resp.content) print(data) return data if __name__ == '__main__': #intent=input() textt=createKeyword() if textt is None: print("\n Result: Intent Saved on Wit.ai Success (200){}")
0.089016
0.463323
<a href="https://colab.research.google.com/github/ChihabEddine98/DL_course/blob/main/go_cnn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import numpy as np import tensorflow as tf import keras from tensorflow.keras.utils import to_categorical,plot_model from keras import layers,regularizers # Params PLANES = 21 MOVES = 361 N = 100000 DIM = 19 ## Generate Dataaa # Inputs input_data = np.random.randint(2,size=(N,DIM,DIM,PLANES)).astype('float32') # Outputs policy_data = np.random.randint(MOVES,size=(N,1)) policy_data = to_categorical(policy_data) value_data = np.random.randint(2,size=(N,1)).astype('float32') # Model structure def GO_CNN(): # Input inp = keras.Input(shape=(DIM,DIM,PLANES),name='board') # Conv block x = layers.Conv2D(64,(3,3),padding='same',activation='relu')(inp) x = layers.BatchNormalization()(x) x = tf.nn.relu(x) x = layers.Conv2D(128,(5,5),padding='same',activation='relu')(x) x = layers.BatchNormalization()(x) x = tf.nn.relu(x) # Outputs policy_head = layers.Conv2D(1, 1, kernel_regularizer=regularizers.l2(0.0001))(x) policy_head = tf.nn.relu(policy_head) policy_head = layers.BatchNormalization()(policy_head) policy_head = layers.Dropout(0.4)(policy_head) policy_head = layers.Flatten()(policy_head) policy_head = layers.Dense(MOVES,'softmax', name='policy')(policy_head) value_head = layers.GlobalAveragePooling2D()(x) value_head = layers.Dense(64, kernel_regularizer=regularizers.l2(0.0001))(value_head) value_head = tf.nn.relu(value_head) value_head = layers.BatchNormalization()(value_head) value_head = layers.Dropout(0.4)(value_head) value_head = layers.Dense(1, activation='sigmoid', name='value', kernel_regularizer=regularizers.l2(0.0001))(value_head) model = keras.Model(inputs=inp, outputs=[policy_head, value_head]) model.compile(optimizer='adam', loss={'policy': 'categorical_crossentropy','value': 'mse'}, loss_weights=[1., 0.2] ) return model def train(model): with tf.device('/device:GPU:0'): history = model.fit(input_data,{'policy': policy_data, 'value': value_data}, batch_size = 256, epochs = 20, verbose = 1, validation_split = 0.1) return history.history model = GO_CNN() plot_model(model, 'multi_input_and_output_model_GO.png', show_shapes=True) history = train(model) import matplotlib.pyplot as plt def plots(epochs , history): fig,ax = plt.subplots(1,2) fig.set_size_inches((12,5)) ax[0].plot(epochs, history['value_loss'] , 'g' , label = 'Value Train acc') ax[0].plot(epochs, history['val_value_loss'],'m',label = 'Value Val acc') ax[0].set_title('Accuracy History ') ax[0].legend() ax[1].plot(epochs, history['policy_loss'] , 'r',label = 'Policy Train loss') ax[1].plot(epochs, history['val_policy_loss'],'c',label = 'Policy Val loss') ax[1].set_title('Loss History ') ax[1].legend() fig.show() epochs = range(1,21) plots(epochs,history) ```
github_jupyter
import numpy as np import tensorflow as tf import keras from tensorflow.keras.utils import to_categorical,plot_model from keras import layers,regularizers # Params PLANES = 21 MOVES = 361 N = 100000 DIM = 19 ## Generate Dataaa # Inputs input_data = np.random.randint(2,size=(N,DIM,DIM,PLANES)).astype('float32') # Outputs policy_data = np.random.randint(MOVES,size=(N,1)) policy_data = to_categorical(policy_data) value_data = np.random.randint(2,size=(N,1)).astype('float32') # Model structure def GO_CNN(): # Input inp = keras.Input(shape=(DIM,DIM,PLANES),name='board') # Conv block x = layers.Conv2D(64,(3,3),padding='same',activation='relu')(inp) x = layers.BatchNormalization()(x) x = tf.nn.relu(x) x = layers.Conv2D(128,(5,5),padding='same',activation='relu')(x) x = layers.BatchNormalization()(x) x = tf.nn.relu(x) # Outputs policy_head = layers.Conv2D(1, 1, kernel_regularizer=regularizers.l2(0.0001))(x) policy_head = tf.nn.relu(policy_head) policy_head = layers.BatchNormalization()(policy_head) policy_head = layers.Dropout(0.4)(policy_head) policy_head = layers.Flatten()(policy_head) policy_head = layers.Dense(MOVES,'softmax', name='policy')(policy_head) value_head = layers.GlobalAveragePooling2D()(x) value_head = layers.Dense(64, kernel_regularizer=regularizers.l2(0.0001))(value_head) value_head = tf.nn.relu(value_head) value_head = layers.BatchNormalization()(value_head) value_head = layers.Dropout(0.4)(value_head) value_head = layers.Dense(1, activation='sigmoid', name='value', kernel_regularizer=regularizers.l2(0.0001))(value_head) model = keras.Model(inputs=inp, outputs=[policy_head, value_head]) model.compile(optimizer='adam', loss={'policy': 'categorical_crossentropy','value': 'mse'}, loss_weights=[1., 0.2] ) return model def train(model): with tf.device('/device:GPU:0'): history = model.fit(input_data,{'policy': policy_data, 'value': value_data}, batch_size = 256, epochs = 20, verbose = 1, validation_split = 0.1) return history.history model = GO_CNN() plot_model(model, 'multi_input_and_output_model_GO.png', show_shapes=True) history = train(model) import matplotlib.pyplot as plt def plots(epochs , history): fig,ax = plt.subplots(1,2) fig.set_size_inches((12,5)) ax[0].plot(epochs, history['value_loss'] , 'g' , label = 'Value Train acc') ax[0].plot(epochs, history['val_value_loss'],'m',label = 'Value Val acc') ax[0].set_title('Accuracy History ') ax[0].legend() ax[1].plot(epochs, history['policy_loss'] , 'r',label = 'Policy Train loss') ax[1].plot(epochs, history['val_policy_loss'],'c',label = 'Policy Val loss') ax[1].set_title('Loss History ') ax[1].legend() fig.show() epochs = range(1,21) plots(epochs,history)
0.838944
0.894513
# Regresión lineal En el siguiente archivo se va a desarrollar la regresión lineal para las combinaciones de cada una de las variables que se encuentran en los datos provistos. Los datos se pueden encontrar [acá](https://docs.google.com/spreadsheets/u/1/d/12h1Pk1ZO-BDcGldzKW-IA9VMkU9RlUOPopFoOK6stdU/pubhtml). ### Imports ``` #!/usr/bin/env python # -*- coding: utf-8 -*- import sys reload(sys) sys.setdefaultencoding('utf8') import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns import statsmodels.formula.api as smf ``` ### Read file ``` municipios = pd.read_csv("/Users/Meili/Dropbox/Uniandes/Noveno/Visual/BonoParcial/Plebiscito-Colombia-2016/docs/Plebiscito.csv") municipios.head() # Create variables variables=np.array(municipios.keys()) delete=np.array(['Municipio','Departamento','GanadorPrimeraVuelta','Ganador','AfectadoPorElConflictoPares','ZonasDeConcentracion','CultivosIlicitos','VotosPorElNo','PorcentajeNo','VotosPorElSi','PorcentajeSi','VotosValidos','VotosTotales','CuantosSalieronAVotar','Abstencion']) variables=np.setdiff1d(variables,delete) comparacion=np.array(['PorcentajeNo','PorcentajeSi','Abstencion']) # To numeric for i in range(7, len(variables)): pd.to_numeric(municipios[variables[i]]) for i in range(len(comparacion)): pd.to_numeric(municipios[comparacion[i]]) ``` ### Scaterplot ``` fig, axs = plt.subplots(1, 2, sharey=True) municipios.plot(kind='scatter', x='PorcentajeSi', y='PorcentajeOscarIvanZuluagaPrimeraVuelta', ax=axs[0], figsize=(16, 8)) municipios.plot(kind='scatter', x='PorcentajeNo', y='PorcentajeOscarIvanZuluagaPrimeraVuelta', ax=axs[1]) ``` ### Regression ``` # create a fitted model in one line lm = smf.ols(formula="PorcentajeSi ~ PorcentajeOscarIvanZuluagaPrimeraVuelta", data=municipios).fit() # print the coefficients lm.params # create a DataFrame with the minimum and maximum values of TV X_new = pd.DataFrame({'PorcentajeOscarIvanZuluagaPrimeraVuelta': [municipios.PorcentajeOscarIvanZuluagaPrimeraVuelta.min(), municipios.PorcentajeOscarIvanZuluagaPrimeraVuelta.max()]}) X_new.head() # make predictions for those x values and store them preds = lm.predict(X_new) preds # first, plot the observed data municipios.plot(kind='scatter', x='PorcentajeSi', y='PorcentajeOscarIvanZuluagaPrimeraVuelta') # then, plot the least squares line plt.plot(X_new, preds, c='red', linewidth=2) ``` ### Correlation ``` correlaciones = municipios.corr() correlaciones results = {} for i in range(len(variables)): for j in range(len(comparacion)): variable = str(variables[i]) comparador = str(comparacion[j]) if (comparador is not None) and (variable is not None): # create a fitted model in one line lm = smf.ols(formula=comparador + " ~ " + variable, data=municipios).fit() results[comparador + " - " + variable] = [lm.params[1], lm.params[0],correlaciones[comparador][variable]] results ``` ### Write results ``` import csv with open('/Users/Meili/Dropbox/Uniandes/Noveno/Visual/BonoParcial/Plebiscito-Colombia-2016/docs/results.csv', 'wb') as csvfile: spamwriter = csv.writer(csvfile, delimiter=',',quotechar=' ', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(['Variable1', 'Variable2', 'pendiente', 'intercepto', 'pearson']) for item in results: line = [] line.append((item.split(' - ')[0]).strip()) line.append((item.split(' - ')[1]).strip()) line.extend(results[item]) spamwriter.writerow([','.join(map(str, line))]) ```
github_jupyter
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys reload(sys) sys.setdefaultencoding('utf8') import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns import statsmodels.formula.api as smf municipios = pd.read_csv("/Users/Meili/Dropbox/Uniandes/Noveno/Visual/BonoParcial/Plebiscito-Colombia-2016/docs/Plebiscito.csv") municipios.head() # Create variables variables=np.array(municipios.keys()) delete=np.array(['Municipio','Departamento','GanadorPrimeraVuelta','Ganador','AfectadoPorElConflictoPares','ZonasDeConcentracion','CultivosIlicitos','VotosPorElNo','PorcentajeNo','VotosPorElSi','PorcentajeSi','VotosValidos','VotosTotales','CuantosSalieronAVotar','Abstencion']) variables=np.setdiff1d(variables,delete) comparacion=np.array(['PorcentajeNo','PorcentajeSi','Abstencion']) # To numeric for i in range(7, len(variables)): pd.to_numeric(municipios[variables[i]]) for i in range(len(comparacion)): pd.to_numeric(municipios[comparacion[i]]) fig, axs = plt.subplots(1, 2, sharey=True) municipios.plot(kind='scatter', x='PorcentajeSi', y='PorcentajeOscarIvanZuluagaPrimeraVuelta', ax=axs[0], figsize=(16, 8)) municipios.plot(kind='scatter', x='PorcentajeNo', y='PorcentajeOscarIvanZuluagaPrimeraVuelta', ax=axs[1]) # create a fitted model in one line lm = smf.ols(formula="PorcentajeSi ~ PorcentajeOscarIvanZuluagaPrimeraVuelta", data=municipios).fit() # print the coefficients lm.params # create a DataFrame with the minimum and maximum values of TV X_new = pd.DataFrame({'PorcentajeOscarIvanZuluagaPrimeraVuelta': [municipios.PorcentajeOscarIvanZuluagaPrimeraVuelta.min(), municipios.PorcentajeOscarIvanZuluagaPrimeraVuelta.max()]}) X_new.head() # make predictions for those x values and store them preds = lm.predict(X_new) preds # first, plot the observed data municipios.plot(kind='scatter', x='PorcentajeSi', y='PorcentajeOscarIvanZuluagaPrimeraVuelta') # then, plot the least squares line plt.plot(X_new, preds, c='red', linewidth=2) correlaciones = municipios.corr() correlaciones results = {} for i in range(len(variables)): for j in range(len(comparacion)): variable = str(variables[i]) comparador = str(comparacion[j]) if (comparador is not None) and (variable is not None): # create a fitted model in one line lm = smf.ols(formula=comparador + " ~ " + variable, data=municipios).fit() results[comparador + " - " + variable] = [lm.params[1], lm.params[0],correlaciones[comparador][variable]] results import csv with open('/Users/Meili/Dropbox/Uniandes/Noveno/Visual/BonoParcial/Plebiscito-Colombia-2016/docs/results.csv', 'wb') as csvfile: spamwriter = csv.writer(csvfile, delimiter=',',quotechar=' ', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(['Variable1', 'Variable2', 'pendiente', 'intercepto', 'pearson']) for item in results: line = [] line.append((item.split(' - ')[0]).strip()) line.append((item.split(' - ')[1]).strip()) line.extend(results[item]) spamwriter.writerow([','.join(map(str, line))])
0.11896
0.777258
# Testing attacks against RobustBench models In this tutorial, we will show how to correctly import [RobustBench]( https://github.com/RobustBench/robustbench) models inside SecML, and how to craft adversarial evasion attacks against them using SecML. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)]( https://colab.research.google.com/github/pralab/secml/blob/HEAD/tutorials/14-RobustBench.ipynb) <div class="alert alert-warning"> **Warning** Requires installation of the `pytorch` extra dependency. See [extra components](../index.rst#extra-components) for more information. </div> ``` %%capture --no-stderr --no-display # NBVAL_IGNORE_OUTPUT try: import secml import torch except ImportError: %pip install git+https://gitlab.com/secml/secml#egg=secml[pytorch] ``` We start by installing the models offered by RobustBench, a repository of pre-trained adversarially robust models, written in PyTorch. All the models are trained on CIFAR-10. To install the library, just open a terminal and execute the following command: ```bash pip install git+https://github.com/RobustBench/[email protected]``` ``` %%capture --no-stderr --no-display # NBVAL_IGNORE_OUTPUT try: import robustbench except ImportError: %pip install git+https://github.com/RobustBench/[email protected] ``` After the installation, we can import the model we like among the one offered by the library ([click here]( https://github.com/RobustBench/robustbench/tree/master/model_info) for the complete list): ``` # NBVAL_IGNORE_OUTPUT from robustbench.utils import load_model from secml.utils import fm from secml import settings output_dir = fm.join(settings.SECML_MODELS_DIR, 'robustbench') model = load_model(model_name='Carmon2019Unlabeled', norm='Linf', model_dir=output_dir) ``` This command will create a `models` directory inside the `secml-data` folder in your home directory, where it will download the desired model, specified by the `model_name` parameter. Since it is a PyTorch model, we can just load one sample from CIFAR-10 to test it. ``` # NBVAL_IGNORE_OUTPUT from secml.data.loader.c_dataloader_cifar import CDataLoaderCIFAR10 train_ds, test_ds = CDataLoaderCIFAR10().load() import torch from secml.ml.features.normalization import CNormalizerMinMax dataset_labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] normalizer = CNormalizerMinMax().fit(train_ds.X) pt = test_ds[0, :] x0, y0 = pt.X, pt.Y x0 = normalizer.transform(x0) input_shape = (3, 32, 32) x0_t = x0.tondarray().reshape(1, 3, 32, 32) y_pred = model(torch.Tensor(x0_t)) print("Predicted classes: {0}".format(dataset_labels[y_pred.argmax(axis=1).item()])) print("Real classes: {0}".format(dataset_labels[y0.item()])) ``` ## Load RobustBench models inside SecML We can now import the pre-trained robust model inside SecML. Since these models are all coded in PyTorch, we just need to use the PyTorch wrapper of SecML. In order to do this, we need to express the `input_shape` of the data, and feed the classifier with the flatten version of the array (under the hood, the framework will reconstruct the original shape): ``` from secml.ml.classifiers import CClassifierPyTorch secml_model = CClassifierPyTorch(model, input_shape=(3,32,32), pretrained=True) y_pred = secml_model.predict(x0) print("Predicted class: {0}".format(dataset_labels[y_pred.item()])) ``` ## Computing evasion attacks Now that we have imported the model inside SecML, we can compute attacks against it. We will use the iterative Projected Gradient Descent (PGD) attack, with `l2` perturbation. ``` from secml.adv.attacks.evasion import CAttackEvasionPGD noise_type = 'l2' # Type of perturbation 'l1' or 'l2' dmax = 0.5 # Maximum perturbation lb, ub = 0, 1 # Bounds of the attack space. Can be set to `None` for unbounded y_target = None # None if `error-generic` or a class label for `error-specific` # Should be chosen depending on the optimization problem solver_params = { 'eta': 0.4, 'max_iter': 100, 'eps': 1e-3 } pgd_ls_attack = CAttackEvasionPGD( classifier=secml_model, double_init_ds=test_ds[0, :], distance=noise_type, dmax=dmax, lb=lb, ub=ub, solver_params=solver_params, y_target=y_target ) y_pred_pgd, _, adv_ds_pgd, _ = pgd_ls_attack.run(x0, y0) print("Real class: {0}".format(dataset_labels[y0.item()])) print("Predicted class after the attack: {0}".format(dataset_labels[y_pred_pgd.item()])) from secml.figure import CFigure %matplotlib inline img_normal = x0.tondarray().reshape((3,32,32)).transpose(2,1,0) img_adv = adv_ds_pgd.X[0,:].tondarray().reshape((3,32,32)).transpose(2,1,0) diff_img = img_normal - img_adv diff_img -= diff_img.min() diff_img /= diff_img.max() fig = CFigure() fig.subplot(1,3,1) fig.sp.imshow(img_normal) fig.sp.title('{0}'.format(dataset_labels[y0.item()])) fig.sp.xticks([]) fig.sp.yticks([]) fig.subplot(1,3,2) fig.sp.imshow(img_adv) fig.sp.title('{0}'.format(dataset_labels[y_pred_pgd.item()])) fig.sp.xticks([]) fig.sp.yticks([]) fig.subplot(1,3,3) fig.sp.imshow(diff_img) fig.sp.title('Amplified perturbation') fig.sp.xticks([]) fig.sp.yticks([]) fig.tight_layout() fig.show() ```
github_jupyter
%%capture --no-stderr --no-display # NBVAL_IGNORE_OUTPUT try: import secml import torch except ImportError: %pip install git+https://gitlab.com/secml/secml#egg=secml[pytorch] pip install git+https://github.com/RobustBench/[email protected]``` After the installation, we can import the model we like among the one offered by the library ([click here]( https://github.com/RobustBench/robustbench/tree/master/model_info) for the complete list): This command will create a `models` directory inside the `secml-data` folder in your home directory, where it will download the desired model, specified by the `model_name` parameter. Since it is a PyTorch model, we can just load one sample from CIFAR-10 to test it. ## Load RobustBench models inside SecML We can now import the pre-trained robust model inside SecML. Since these models are all coded in PyTorch, we just need to use the PyTorch wrapper of SecML. In order to do this, we need to express the `input_shape` of the data, and feed the classifier with the flatten version of the array (under the hood, the framework will reconstruct the original shape): ## Computing evasion attacks Now that we have imported the model inside SecML, we can compute attacks against it. We will use the iterative Projected Gradient Descent (PGD) attack, with `l2` perturbation.
0.644673
0.979176
<a href="https://colab.research.google.com/github/Jam516/MEP-object-detection/blob/master/Thesis_Model_gym.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## Import Tensorflow ``` !pip3 install numpy==1.17.4 %tensorflow_version 1.x import tensorflow as tf device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) ``` ## Mount drive ``` from google.colab import drive drive.mount('/content/gdrive') %cd '/content/gdrive/My Drive/Thesis/Home' ``` ## Install API ``` !git clone https://github.com/tensorflow/models.git !git clone https://github.com/cocodataset/cocoapi.git !cd cocoapi/PythonAPI; make; cp -r pycocotools /content/gdrive/My\ Drive/Thesis/Home/models/research/ !apt-get install protobuf-compiler python-pil python-lxml python-tk !pip install Cython %cd /content/gdrive/My Drive/Thesis/Home/models/research/ !protoc object_detection/protos/*.proto --python_out=. import os os.environ['PYTHONPATH'] += ':/content/gdrive/My Drive/Thesis/Home/models/research/:/content/gdrive/My Drive/Thesis/Home/models/research/slim' !apt-get install protobuf-compiler python-pil python-lxml python-tk !pip install Cython !python setup.py build !python setup.py install ``` ## **Setup** Run at start of every session ``` import os os.environ['PYTHONPATH'] += ':/content/gdrive/My Drive/Thesis/Home/models/research/:/content/gdrive/My Drive/Thesis/Home/models/research/slim' %cd /content/gdrive/My Drive/Thesis/Home/models/research/ import time, psutil Start = time.time()- psutil.boot_time() Left= 12*3600 - Start print('Time remaining for this session is: ', Left/3600) ``` Test setup ``` !python object_detection/builders/model_builder_test.py ``` ## **Download and Configure Model** ``` %cd /content/gdrive/My Drive/Thesis/Home/models/research/object_detection # unpack model after manually uploading it # !tar -xvf model.tar !unzip model.zip ``` Copy in config file ## **Training** ``` %cd /content/gdrive/My Drive/Thesis/Home/models/research/object_detection !python model_main.py --logtostderr --model_dir=training/ --pipeline_config_path=training/faster_rcnn_nas_coco.config # !python train.py --logtostderr --train_dir=training/ --pipeline_config_path=training/faster_rcnn_nas_coco.config ``` ## Export Model ``` %cd /content/gdrive/My Drive/Thesis/Home/models import os os.environ['PYTHONPATH'] += ':/content/gdrive/My Drive/Thesis/Home/models/research/:/content/gdrive/My Drive/Thesis/Home/models/research/slim' %cd /content/gdrive/My Drive/Thesis/Home/models/research/object_detection !python export_inference_graph.py \ --input_type image_tensor \ --pipeline_config_path training/faster_rcnn_nas_coco.config \ --trained_checkpoint_prefix training/model.ckpt-7515 \ #change to appropriate checkpoint --output_directory Exp_graph/ !zip -r MY_exp_g.zip Exp_graph ``` ## **Test Model** ``` MODEL_NAME = 'My_exp_graph' PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb' PATH_TO_LABELS = 'training/object-detection.pbtxt' NUM_CLASSES = 3 import numpy as np import os import six.moves.urllib as urllib import sys import tarfile import tensorflow as tf import zipfile from distutils.version import StrictVersion from collections import defaultdict from io import StringIO from matplotlib import pyplot as plt from PIL import Image sys.path.append("..") from object_detection.utils import ops as utils_ops %matplotlib inline from utils import label_map_util from utils import visualization_utils as vis_util detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8) # Detection ------------------------------------------------------- PATH_TO_TEST_IMAGES_DIR = 'test_images/' TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.png'.format(i)) for i in range(3, 6) ] # Size, in inches, of the output images. IMAGE_SIZE = (24, 20) def run_inference_for_single_image(image, graph): with graph.as_default(): with tf.Session() as sess: ops = tf.get_default_graph().get_operations() all_tensor_names = {output.name for op in ops for output in op.outputs} tensor_dict = {} for key in ['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks']: tensor_name = key + ':0' if tensor_name in all_tensor_names:tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name) if 'detection_masks' in tensor_dict: detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0]) detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0]) real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32) detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1]) detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1]) detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(detection_masks, detection_boxes, image.shape[1], image.shape[2]) detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8) tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0) image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0') # Run inference output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image}) # all outputs are float32 numpy arrays, so convert types as appropriate output_dict['num_detections'] = int(output_dict['num_detections'][0]) output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8) output_dict['detection_boxes'] = output_dict['detection_boxes'][0] output_dict['detection_scores'] = output_dict['detection_scores'][0] if 'detection_masks' in output_dict: output_dict['detection_masks'] = output_dict['detection_masks'][0] return output_dict x = 1 for image_path in TEST_IMAGE_PATHS: image = Image.open(image_path) # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. image_np = load_image_into_numpy_array(image) # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) # Actual detection. output_dict = run_inference_for_single_image(image_np_expanded, detection_graph) # Visualization of the results of a detection. vis_util.visualize_boxes_and_labels_on_image_array( image_np, output_dict['detection_boxes'], output_dict['detection_classes'], output_dict['detection_scores'], category_index, instance_masks=output_dict.get('detection_masks'), use_normalized_coordinates=True, line_thickness=3) plt.figure(figsize=IMAGE_SIZE) plt.imshow(image_np) plt.savefig('pic'+str(x)+'.png') x = x + 1 ``` Author: John Kufuor Adapted From: (Kinsley, 2017) (Tensorflow, 2020) (Solomon, 2019)
github_jupyter
!pip3 install numpy==1.17.4 %tensorflow_version 1.x import tensorflow as tf device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) from google.colab import drive drive.mount('/content/gdrive') %cd '/content/gdrive/My Drive/Thesis/Home' !git clone https://github.com/tensorflow/models.git !git clone https://github.com/cocodataset/cocoapi.git !cd cocoapi/PythonAPI; make; cp -r pycocotools /content/gdrive/My\ Drive/Thesis/Home/models/research/ !apt-get install protobuf-compiler python-pil python-lxml python-tk !pip install Cython %cd /content/gdrive/My Drive/Thesis/Home/models/research/ !protoc object_detection/protos/*.proto --python_out=. import os os.environ['PYTHONPATH'] += ':/content/gdrive/My Drive/Thesis/Home/models/research/:/content/gdrive/My Drive/Thesis/Home/models/research/slim' !apt-get install protobuf-compiler python-pil python-lxml python-tk !pip install Cython !python setup.py build !python setup.py install import os os.environ['PYTHONPATH'] += ':/content/gdrive/My Drive/Thesis/Home/models/research/:/content/gdrive/My Drive/Thesis/Home/models/research/slim' %cd /content/gdrive/My Drive/Thesis/Home/models/research/ import time, psutil Start = time.time()- psutil.boot_time() Left= 12*3600 - Start print('Time remaining for this session is: ', Left/3600) !python object_detection/builders/model_builder_test.py %cd /content/gdrive/My Drive/Thesis/Home/models/research/object_detection # unpack model after manually uploading it # !tar -xvf model.tar !unzip model.zip %cd /content/gdrive/My Drive/Thesis/Home/models/research/object_detection !python model_main.py --logtostderr --model_dir=training/ --pipeline_config_path=training/faster_rcnn_nas_coco.config # !python train.py --logtostderr --train_dir=training/ --pipeline_config_path=training/faster_rcnn_nas_coco.config %cd /content/gdrive/My Drive/Thesis/Home/models import os os.environ['PYTHONPATH'] += ':/content/gdrive/My Drive/Thesis/Home/models/research/:/content/gdrive/My Drive/Thesis/Home/models/research/slim' %cd /content/gdrive/My Drive/Thesis/Home/models/research/object_detection !python export_inference_graph.py \ --input_type image_tensor \ --pipeline_config_path training/faster_rcnn_nas_coco.config \ --trained_checkpoint_prefix training/model.ckpt-7515 \ #change to appropriate checkpoint --output_directory Exp_graph/ !zip -r MY_exp_g.zip Exp_graph MODEL_NAME = 'My_exp_graph' PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb' PATH_TO_LABELS = 'training/object-detection.pbtxt' NUM_CLASSES = 3 import numpy as np import os import six.moves.urllib as urllib import sys import tarfile import tensorflow as tf import zipfile from distutils.version import StrictVersion from collections import defaultdict from io import StringIO from matplotlib import pyplot as plt from PIL import Image sys.path.append("..") from object_detection.utils import ops as utils_ops %matplotlib inline from utils import label_map_util from utils import visualization_utils as vis_util detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8) # Detection ------------------------------------------------------- PATH_TO_TEST_IMAGES_DIR = 'test_images/' TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.png'.format(i)) for i in range(3, 6) ] # Size, in inches, of the output images. IMAGE_SIZE = (24, 20) def run_inference_for_single_image(image, graph): with graph.as_default(): with tf.Session() as sess: ops = tf.get_default_graph().get_operations() all_tensor_names = {output.name for op in ops for output in op.outputs} tensor_dict = {} for key in ['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks']: tensor_name = key + ':0' if tensor_name in all_tensor_names:tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name) if 'detection_masks' in tensor_dict: detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0]) detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0]) real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32) detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1]) detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1]) detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(detection_masks, detection_boxes, image.shape[1], image.shape[2]) detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8) tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0) image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0') # Run inference output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image}) # all outputs are float32 numpy arrays, so convert types as appropriate output_dict['num_detections'] = int(output_dict['num_detections'][0]) output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8) output_dict['detection_boxes'] = output_dict['detection_boxes'][0] output_dict['detection_scores'] = output_dict['detection_scores'][0] if 'detection_masks' in output_dict: output_dict['detection_masks'] = output_dict['detection_masks'][0] return output_dict x = 1 for image_path in TEST_IMAGE_PATHS: image = Image.open(image_path) # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. image_np = load_image_into_numpy_array(image) # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) # Actual detection. output_dict = run_inference_for_single_image(image_np_expanded, detection_graph) # Visualization of the results of a detection. vis_util.visualize_boxes_and_labels_on_image_array( image_np, output_dict['detection_boxes'], output_dict['detection_classes'], output_dict['detection_scores'], category_index, instance_masks=output_dict.get('detection_masks'), use_normalized_coordinates=True, line_thickness=3) plt.figure(figsize=IMAGE_SIZE) plt.imshow(image_np) plt.savefig('pic'+str(x)+'.png') x = x + 1
0.435181
0.63477
<img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200> <br></br> <br></br> ## *Data Science Unit 4 Sprint 3 Assignment 1* # Recurrent Neural Networks and Long Short Term Memory (LSTM) ![Monkey at a typewriter](https://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Chimpanzee_seated_at_typewriter.jpg/603px-Chimpanzee_seated_at_typewriter.jpg) It is said that [infinite monkeys typing for an infinite amount of time](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of Wiliam Shakespeare. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM. This text file contains the complete works of Shakespeare: https://www.gutenberg.org/files/100/100-0.txt Use it as training data for an RNN - you can keep it simple and train character level, and that is suggested as an initial approach. Then, use that trained RNN to generate Shakespearean-ish text. Your goal - a function that can take, as an argument, the size of text (e.g. number of characters or lines) to generate, and returns generated text of that size. Note - Shakespeare wrote an awful lot. It's OK, especially initially, to sample/use smaller data and parameters, so you can have a tighter feedback loop when you're trying to get things running. Then, once you've got a proof of concept - start pushing it more! ``` import requests import pandas as pd url = "https://www.gutenberg.org/files/100/100-0.txt" r = requests.get(url) r.encoding = r.apparent_encoding data = r.text data = data.split('\r\n') toc = [l.strip() for l in data[44:130:2]] # Skip the Table of Contents data = data[135:] # Fixing Titles toc[9] = 'THE LIFE OF KING HENRY V' toc[18] = 'MACBETH' toc[24] = 'OTHELLO, THE MOOR OF VENICE' toc[34] = 'TWELFTH NIGHT: OR, WHAT YOU WILL' locations = {id_:{'title':title, 'start':-99} for id_,title in enumerate(toc)} # Start for e,i in enumerate(data): for t,title in enumerate(toc): if title in i: locations[t].update({'start':e}) df_toc = pd.DataFrame.from_dict(locations, orient='index') df_toc['end'] = df_toc['start'].shift(-1).apply(lambda x: x-1) df_toc.loc[42, 'end'] = len(data) df_toc['end'] = df_toc['end'].astype('int') df_toc['text'] = df_toc.apply(lambda x: '\r\n'.join(data[ x['start'] : int(x['end']) ]), axis=1) #Shakespeare Data Parsed by Play df_toc ``` # **Text Cleaning** ``` import re import numpy as np import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from tensorflow.keras.callbacks import LambdaCallback from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, LSTM from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing import sequence from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Embedding, Dropout, SimpleRNN, LSTM import numpy as np import random import sys import os class TextCleaner: def __init__(self, stop_word_type): self.stop_word_type = stop_word_type def download_stop_words(self): nltk.download(self.stop_word_type) def remove_punctuation(self, pattern, text): sub = re.sub(pattern, "", text) return sub def remove_special_characters(self, pattern, text): sub = re.sub(pattern, "", text) return sub def remove_digits(self, pattern, text): sub = re.sub(pattern, "", text) return sub def remove_white_space(self, text): sub = re.sub(r"\s{2}", "", text) return sub def tokenize(self, text): wrd_splitter = text.split() return wrd_splitter def remove_stop_words( self, tokens, language='english'): stop_words = set(stopwords.words(language)) filtered_tokens = [word for word in tokens if not word in stop_words] return filtered_tokens tc = TextCleaner('stopwords') tc.download_stop_words() data = df_toc['text'][1:5] data = data.apply(lambda x: x.lower()) data = data.apply( lambda x: tc.remove_punctuation(r"[\[\]\-?\":&;'$\\*_/!@`%\{\}\|\(\)]", x)) data = data.apply( lambda x: tc.remove_digits(r"[\d]", x)) data = data.apply( lambda x: tc.remove_special_characters(r"(\n)|(\r)", x)) data = data.apply( lambda x: tc.remove_white_space(x)) data = data.apply(lambda x: x.encode('ascii', 'ignore')) data = data.apply(lambda x: x.decode('UTF-8')) data = data.apply( lambda x: tc.tokenize(x)) # data = data.apply( # lambda x: tc.remove_stop_words(x)) for e, i in enumerate(data): if i == []: data.drop(index=e, inplace=True) joined_text = [] for text in data: for word in text: joined_text.append(word) joined_text = " ".join(joined_text) # Unique Characters chars = list(set(joined_text)) # Lookup Tables char_int = {c:i for i, c in enumerate(chars)} int_char = {i:c for i, c in enumerate(chars)} int_char # Create the sequence data maxlen = 50 step = 7 encoded = [char_int[c] for c in joined_text] sequences = [] # Each element is 40 chars long next_char = [] # One element for each sequence for i in range(0, len(encoded) - maxlen, step): sequences.append(encoded[i : i + maxlen]) next_char.append(encoded[i + maxlen]) print('sequences: ', len(sequences)) # print(encoded) # Create x & y x = np.zeros((len(sequences), maxlen, len(chars)), dtype=np.bool) y = np.zeros((len(sequences),len(chars)), dtype=np.bool) for i, sequence in enumerate(sequences): for t, char in enumerate(sequence): x[i,t,char] = 1 y[i, next_char[i]] = 1 x.shape # build the model: a single LSTM model = Sequential() model.add(LSTM(128, input_shape=(maxlen, len(chars)))) model.add(Dense(len(chars), activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') def sample(preds): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / 1 exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) def on_epoch_end(epoch, _): # Function invoked at end of each epoch. Prints generated text. print() print('----- Generating text after Epoch: %d' % epoch) start_index = random.randint(0, len(joined_text) - maxlen - 1) generated = '' sentence = joined_text[start_index: start_index + maxlen] generated += sentence print('----- Generating with seed: "' + sentence + '"') sys.stdout.write(generated) for i in range(400): x_pred = np.zeros((1, maxlen, len(chars))) for t, char in enumerate(sentence): x_pred[0, t, char_int[char]] = 1 preds = model.predict(x_pred, verbose=0)[0] next_index = sample(preds) next_char = int_char[next_index] sentence = sentence[1:] + next_char sys.stdout.write(next_char) sys.stdout.flush() print() print_callback = LambdaCallback(on_epoch_end=on_epoch_end) # fit the model model.fit(x, y, batch_size=32, epochs=10, callbacks=[print_callback]) model.predict("yonder window") ``` # Resources and Stretch Goals ## Stretch goals: - Refine the training and generation of text to be able to ask for different genres/styles of Shakespearean text (e.g. plays versus sonnets) - Train a classification model that takes text and returns which work of Shakespeare it is most likely to be from - Make it more performant! Many possible routes here - lean on Keras, optimize the code, and/or use more resources (AWS, etc.) - Revisit the news example from class, and improve it - use categories or tags to refine the model/generation, or train a news classifier - Run on bigger, better data ## Resources: - [The Unreasonable Effectiveness of Recurrent Neural Networks](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) - a seminal writeup demonstrating a simple but effective character-level NLP RNN - [Simple NumPy implementation of RNN](https://github.com/JY-Yoon/RNN-Implementation-using-NumPy/blob/master/RNN%20Implementation%20using%20NumPy.ipynb) - Python 3 version of the code from "Unreasonable Effectiveness" - [TensorFlow RNN Tutorial](https://github.com/tensorflow/models/tree/master/tutorials/rnn) - code for training a RNN on the Penn Tree Bank language dataset - [4 part tutorial on RNN](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/) - relates RNN to the vanishing gradient problem, and provides example implementation - [RNN training tips and tricks](https://github.com/karpathy/char-rnn#tips-and-tricks) - some rules of thumb for parameterizing and training your RNN
github_jupyter
import requests import pandas as pd url = "https://www.gutenberg.org/files/100/100-0.txt" r = requests.get(url) r.encoding = r.apparent_encoding data = r.text data = data.split('\r\n') toc = [l.strip() for l in data[44:130:2]] # Skip the Table of Contents data = data[135:] # Fixing Titles toc[9] = 'THE LIFE OF KING HENRY V' toc[18] = 'MACBETH' toc[24] = 'OTHELLO, THE MOOR OF VENICE' toc[34] = 'TWELFTH NIGHT: OR, WHAT YOU WILL' locations = {id_:{'title':title, 'start':-99} for id_,title in enumerate(toc)} # Start for e,i in enumerate(data): for t,title in enumerate(toc): if title in i: locations[t].update({'start':e}) df_toc = pd.DataFrame.from_dict(locations, orient='index') df_toc['end'] = df_toc['start'].shift(-1).apply(lambda x: x-1) df_toc.loc[42, 'end'] = len(data) df_toc['end'] = df_toc['end'].astype('int') df_toc['text'] = df_toc.apply(lambda x: '\r\n'.join(data[ x['start'] : int(x['end']) ]), axis=1) #Shakespeare Data Parsed by Play df_toc import re import numpy as np import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from tensorflow.keras.callbacks import LambdaCallback from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, LSTM from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing import sequence from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Embedding, Dropout, SimpleRNN, LSTM import numpy as np import random import sys import os class TextCleaner: def __init__(self, stop_word_type): self.stop_word_type = stop_word_type def download_stop_words(self): nltk.download(self.stop_word_type) def remove_punctuation(self, pattern, text): sub = re.sub(pattern, "", text) return sub def remove_special_characters(self, pattern, text): sub = re.sub(pattern, "", text) return sub def remove_digits(self, pattern, text): sub = re.sub(pattern, "", text) return sub def remove_white_space(self, text): sub = re.sub(r"\s{2}", "", text) return sub def tokenize(self, text): wrd_splitter = text.split() return wrd_splitter def remove_stop_words( self, tokens, language='english'): stop_words = set(stopwords.words(language)) filtered_tokens = [word for word in tokens if not word in stop_words] return filtered_tokens tc = TextCleaner('stopwords') tc.download_stop_words() data = df_toc['text'][1:5] data = data.apply(lambda x: x.lower()) data = data.apply( lambda x: tc.remove_punctuation(r"[\[\]\-?\":&;'$\\*_/!@`%\{\}\|\(\)]", x)) data = data.apply( lambda x: tc.remove_digits(r"[\d]", x)) data = data.apply( lambda x: tc.remove_special_characters(r"(\n)|(\r)", x)) data = data.apply( lambda x: tc.remove_white_space(x)) data = data.apply(lambda x: x.encode('ascii', 'ignore')) data = data.apply(lambda x: x.decode('UTF-8')) data = data.apply( lambda x: tc.tokenize(x)) # data = data.apply( # lambda x: tc.remove_stop_words(x)) for e, i in enumerate(data): if i == []: data.drop(index=e, inplace=True) joined_text = [] for text in data: for word in text: joined_text.append(word) joined_text = " ".join(joined_text) # Unique Characters chars = list(set(joined_text)) # Lookup Tables char_int = {c:i for i, c in enumerate(chars)} int_char = {i:c for i, c in enumerate(chars)} int_char # Create the sequence data maxlen = 50 step = 7 encoded = [char_int[c] for c in joined_text] sequences = [] # Each element is 40 chars long next_char = [] # One element for each sequence for i in range(0, len(encoded) - maxlen, step): sequences.append(encoded[i : i + maxlen]) next_char.append(encoded[i + maxlen]) print('sequences: ', len(sequences)) # print(encoded) # Create x & y x = np.zeros((len(sequences), maxlen, len(chars)), dtype=np.bool) y = np.zeros((len(sequences),len(chars)), dtype=np.bool) for i, sequence in enumerate(sequences): for t, char in enumerate(sequence): x[i,t,char] = 1 y[i, next_char[i]] = 1 x.shape # build the model: a single LSTM model = Sequential() model.add(LSTM(128, input_shape=(maxlen, len(chars)))) model.add(Dense(len(chars), activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') def sample(preds): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / 1 exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) def on_epoch_end(epoch, _): # Function invoked at end of each epoch. Prints generated text. print() print('----- Generating text after Epoch: %d' % epoch) start_index = random.randint(0, len(joined_text) - maxlen - 1) generated = '' sentence = joined_text[start_index: start_index + maxlen] generated += sentence print('----- Generating with seed: "' + sentence + '"') sys.stdout.write(generated) for i in range(400): x_pred = np.zeros((1, maxlen, len(chars))) for t, char in enumerate(sentence): x_pred[0, t, char_int[char]] = 1 preds = model.predict(x_pred, verbose=0)[0] next_index = sample(preds) next_char = int_char[next_index] sentence = sentence[1:] + next_char sys.stdout.write(next_char) sys.stdout.flush() print() print_callback = LambdaCallback(on_epoch_end=on_epoch_end) # fit the model model.fit(x, y, batch_size=32, epochs=10, callbacks=[print_callback]) model.predict("yonder window")
0.425725
0.871146
# Training Neural Networks The network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time. <img src="assets/function_approx.png" width=500px> At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function. To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems $$ \large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2} $$ where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels. By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. <img src='assets/gradient_descent.png' width=350px> ## Backpropagation For single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks. Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation. <img src='assets/backprop_diagram.png' width=550px> In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss. To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule. $$ \large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2} $$ **Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on. We update our weights using this gradient with some learning rate $\alpha$. $$ \large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1} $$ The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. ## Losses in PyTorch Let's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels. Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss), > This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class. > > The input is expected to contain scores for each class. This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities. ``` import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) ``` ### Note If you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook. ``` # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10)) # Define the loss criterion = nn.CrossEntropyLoss() # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss) ``` In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss)). >**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately. ``` # TODO: Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) # TODO: Define the loss criterion = nn.NLLLoss() ### Run this to check your work # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss) ``` ## Autograd Now that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`. You can turn off gradients for a block of code with the `torch.no_grad()` content: ```python x = torch.zeros(1, requires_grad=True) >>> with torch.no_grad(): ... y = x * 2 >>> y.requires_grad False ``` Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`. The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`. ``` x = torch.randn(2,2, requires_grad=True) print(x) y = x**2 print(y) ``` Below we can see the operation that created `y`, a power operation `PowBackward0`. ``` ## grad_fn shows the function that generated this variable print(y.grad_fn) ``` The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean. ``` z = y.mean() print(z) ``` You can check the gradients for `x` and `y` but they are empty currently. ``` print(x.grad) ``` To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x` $$ \frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2} $$ ``` z.backward() print(x.grad) print(x/2) ``` These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. ## Loss and Autograd together When we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass. ``` # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() images, labels = next(iter(trainloader)) images = images.view(images.shape[0], -1) logits = model(images) loss = criterion(logits, labels) print('Before backward pass: \n', model[0].weight.grad) loss.backward() print('After backward pass: \n', model[0].weight.grad) ``` ## Training the network! There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below. ``` from torch import optim # Optimizers require the parameters to optimize and a learning rate optimizer = optim.SGD(model.parameters(), lr=0.01) ``` Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch: * Make a forward pass through the network * Use the network output to calculate the loss * Perform a backward pass through the network with `loss.backward()` to calculate the gradients * Take a step with the optimizer to update the weights Below I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches. ``` print('Initial weights - ', model[0].weight) images, labels = next(iter(trainloader)) images.resize_(64, 784) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # Forward pass, then backward pass, then update weights output = model(images) loss = criterion(output, labels) loss.backward() print('Gradient -', model[0].weight.grad) # Take an update step and few the new weights optimizer.step() print('Updated weights - ', model[0].weight) ``` ### Training for real Now we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights. >**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch. ``` ## Your solution here model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.003) epochs = 5 for e in range(epochs): running_loss = 0 for images, labels in trainloader: # Flatten MNIST images into a 784 long vector images = images.view(images.shape[0], -1) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # TODO: Training pass output = model(images) loss = criterion(output, labels) loss.backward() # Take an update step and few the new weights optimizer.step() running_loss += loss.item() else: print(f"Training loss: {running_loss/len(trainloader)}") ``` With the network trained, we can check out it's predictions. ``` %matplotlib inline import helper images, labels = next(iter(trainloader)) img = images[9].view(1, 784) # Turn off gradients to speed up this part with torch.no_grad(): logps = model(img) # Output of the network are log-probabilities, need to take exponential for probabilities ps = torch.exp(logps) helper.view_classify(img.view(1, 28, 28), ps) ``` Now our network is brilliant. It can accurately predict the digits in our images. Next up you'll write the code for training a neural network on a more complex dataset.
github_jupyter
import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10)) # Define the loss criterion = nn.CrossEntropyLoss() # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss) # TODO: Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) # TODO: Define the loss criterion = nn.NLLLoss() ### Run this to check your work # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss) x = torch.zeros(1, requires_grad=True) >>> with torch.no_grad(): ... y = x * 2 >>> y.requires_grad False x = torch.randn(2,2, requires_grad=True) print(x) y = x**2 print(y) ## grad_fn shows the function that generated this variable print(y.grad_fn) z = y.mean() print(z) print(x.grad) z.backward() print(x.grad) print(x/2) # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() images, labels = next(iter(trainloader)) images = images.view(images.shape[0], -1) logits = model(images) loss = criterion(logits, labels) print('Before backward pass: \n', model[0].weight.grad) loss.backward() print('After backward pass: \n', model[0].weight.grad) from torch import optim # Optimizers require the parameters to optimize and a learning rate optimizer = optim.SGD(model.parameters(), lr=0.01) print('Initial weights - ', model[0].weight) images, labels = next(iter(trainloader)) images.resize_(64, 784) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # Forward pass, then backward pass, then update weights output = model(images) loss = criterion(output, labels) loss.backward() print('Gradient -', model[0].weight.grad) # Take an update step and few the new weights optimizer.step() print('Updated weights - ', model[0].weight) ## Your solution here model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.003) epochs = 5 for e in range(epochs): running_loss = 0 for images, labels in trainloader: # Flatten MNIST images into a 784 long vector images = images.view(images.shape[0], -1) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # TODO: Training pass output = model(images) loss = criterion(output, labels) loss.backward() # Take an update step and few the new weights optimizer.step() running_loss += loss.item() else: print(f"Training loss: {running_loss/len(trainloader)}") %matplotlib inline import helper images, labels = next(iter(trainloader)) img = images[9].view(1, 784) # Turn off gradients to speed up this part with torch.no_grad(): logps = model(img) # Output of the network are log-probabilities, need to take exponential for probabilities ps = torch.exp(logps) helper.view_classify(img.view(1, 28, 28), ps)
0.859693
0.994219
``` !which python cd /Users/itamar/git/astro/urf/prf !pwd !ls import PRF.distance as distance import PRF import numpy from imblearn.datasets import make_imbalance from sklearn import datasets from collections import Counter import itertools import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix numpy.set_printoptions(precision=2) class_names = ['L1','L2','L3','S1','S2','S3'] # Split the data into a training set and a test set # X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Run classifier, using a model that is too regularized (C too low) to see # the impact on the results #RF = RandomForestClassifier(n_estimators=n_trees,n_jobs=-1) #RF.fit(X_train, y_train) #y_pred = RF.predict(X_test) def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, numpy.newaxis] #print("Normalized confusion matrix") # else: #print('Confusion matrix, without normalization') #print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title, fontsize = 15) plt.colorbar() tick_marks = numpy.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45, fontsize = 15) plt.yticks(tick_marks, classes, fontsize = 15) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label', fontsize = 15) plt.xlabel('Predicted label', fontsize = 15) plt.tight_layout() n_samples= 100000 n_classes= 2 X, y = datasets.make_classification(n_samples = n_samples, n_features=10, n_classes=3, n_informative=10, n_redundant=0) X2, y2 = datasets.make_classification(n_samples = n_samples, n_features=10, n_classes=3, n_informative=10, n_redundant=0) X2[:,:5] = X2[:,:5] + 10 y2 = y2 + 3 X = numpy.vstack([X,X2]) y = numpy.hstack([y,y2]) print('Distribution before imbalancing: {}'.format(Counter(y))) X_res, y_res = make_imbalance(X, y, sampling_strategy={0: 20000, 1: 1000, 2: 250, 3:500, 4:100, 5:50}) print('Distribution after imbalancing: {}'.format(Counter(y_res))) n_trees = 100 n_samples_ = X_res.shape[0] n_features = 'auto' n_train = 20000 n_test = 20000 train_inds = numpy.random.choice(numpy.arange(n_samples_),n_train) test_inds = numpy.random.choice(numpy.arange(n_samples_),n_test) X_train = X_res[train_inds] X_test = X_res[test_inds] y_train = y_res[train_inds] y_test = y_res[test_inds] print('Distribution after imbalancing: {}'.format(Counter(y_train))) ``` # Supervised RF ``` n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True) prf_cls.fit(X=X_train, y=y_train) print(prf_cls.score(X_test, y=y_test)) pred = prf_cls.predict(X_test) cnf_matrix = confusion_matrix(y_test, pred) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='Supervised RF, Confusion matrix, w/o normalization') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='Supervised RF, Normalized confusion matrix') plt.show() ``` # original Unsupervised RF ``` %%time n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0) prf_cls.fit(X=X_test) pred0 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred0) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with original synthetic data') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with original synthetic data') plt.show() ``` # Unsupervised RF with synthetic data inside the tree nodes * new synthetic data is created with probability 0.5 in each node ``` %%time n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.5) prf_cls.fit(X=X_test) pred05 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred05) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.5') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.5') plt.show() ``` # Unsupervised RF with synthetic data inside the tree nodes * other fractions of tree nodes with new synthetic data ``` n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.1) prf_cls.fit(X=X_test) pred01 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred01) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.1') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.1') plt.show() n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.25) prf_cls.fit(X=X_test) pred025 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred025) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.25') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.25') plt.show() n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.75) prf_cls.fit(X=X_test) pred075 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred075) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.75') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.75') plt.show() n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.9) prf_cls.fit(X=X_test) pred09 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred09) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.9') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.9') plt.show() ```
github_jupyter
!which python cd /Users/itamar/git/astro/urf/prf !pwd !ls import PRF.distance as distance import PRF import numpy from imblearn.datasets import make_imbalance from sklearn import datasets from collections import Counter import itertools import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix numpy.set_printoptions(precision=2) class_names = ['L1','L2','L3','S1','S2','S3'] # Split the data into a training set and a test set # X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Run classifier, using a model that is too regularized (C too low) to see # the impact on the results #RF = RandomForestClassifier(n_estimators=n_trees,n_jobs=-1) #RF.fit(X_train, y_train) #y_pred = RF.predict(X_test) def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, numpy.newaxis] #print("Normalized confusion matrix") # else: #print('Confusion matrix, without normalization') #print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title, fontsize = 15) plt.colorbar() tick_marks = numpy.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45, fontsize = 15) plt.yticks(tick_marks, classes, fontsize = 15) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label', fontsize = 15) plt.xlabel('Predicted label', fontsize = 15) plt.tight_layout() n_samples= 100000 n_classes= 2 X, y = datasets.make_classification(n_samples = n_samples, n_features=10, n_classes=3, n_informative=10, n_redundant=0) X2, y2 = datasets.make_classification(n_samples = n_samples, n_features=10, n_classes=3, n_informative=10, n_redundant=0) X2[:,:5] = X2[:,:5] + 10 y2 = y2 + 3 X = numpy.vstack([X,X2]) y = numpy.hstack([y,y2]) print('Distribution before imbalancing: {}'.format(Counter(y))) X_res, y_res = make_imbalance(X, y, sampling_strategy={0: 20000, 1: 1000, 2: 250, 3:500, 4:100, 5:50}) print('Distribution after imbalancing: {}'.format(Counter(y_res))) n_trees = 100 n_samples_ = X_res.shape[0] n_features = 'auto' n_train = 20000 n_test = 20000 train_inds = numpy.random.choice(numpy.arange(n_samples_),n_train) test_inds = numpy.random.choice(numpy.arange(n_samples_),n_test) X_train = X_res[train_inds] X_test = X_res[test_inds] y_train = y_res[train_inds] y_test = y_res[test_inds] print('Distribution after imbalancing: {}'.format(Counter(y_train))) n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True) prf_cls.fit(X=X_train, y=y_train) print(prf_cls.score(X_test, y=y_test)) pred = prf_cls.predict(X_test) cnf_matrix = confusion_matrix(y_test, pred) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='Supervised RF, Confusion matrix, w/o normalization') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='Supervised RF, Normalized confusion matrix') plt.show() %%time n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0) prf_cls.fit(X=X_test) pred0 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred0) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with original synthetic data') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with original synthetic data') plt.show() %%time n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.5) prf_cls.fit(X=X_test) pred05 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred05) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.5') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.5') plt.show() n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.1) prf_cls.fit(X=X_test) pred01 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred01) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.1') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.1') plt.show() n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.25) prf_cls.fit(X=X_test) pred025 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred025) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.25') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.25') plt.show() n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.75) prf_cls.fit(X=X_test) pred075 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred075) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.75') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.75') plt.show() n_trees = 100 prf_cls = PRF.prf(n_estimators=n_trees, bootstrap=True, new_syn_data_frac=0.9) prf_cls.fit(X=X_test) pred09 = distance.predict_urf(prf_cls, X_test, y_test) cnf_matrix = confusion_matrix(y_test, pred09) # Plot non-normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, title='URF with new synthetic data inside leafs, f=0.9') # Plot normalized confusion matrix plt.figure(figsize = (10,7)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='URF with new synthetic data inside leafs, f=0.9') plt.show()
0.659295
0.769622
# 列表List - 一个列表可以储存任意大小的数据集合,你可以理解为他是一个容器 ## 先来一个例子爽一爽 ![](../Photo/115.png) ## 创建一个列表 - a = [1,2,3,4,5] ``` a = []#里面可以放任何类型的数据 type (a) c = 'abcd' list(c)#强制转换 ''.join(['a','b','c']) ``` ## 列表的一般操作 ![](../Photo/116.png) ``` a = [1,2] 10*a a = [1,2,3,4,5,[100,200]] a a[5][0] b = [1,2,3,4,5,6,7,8,9,10] for i in range (0,10,2): b[i] = 100 b b = [1,2,3,4,5,6,7,8,9,10,11] for i in range (0,10,3): print(b[i:i+2]) a = [1,2,3,[3,4]] count = 0 for i in a: if type(i)==list: for j in i: count +=1 else: count +=1 print(count) a = [1,2,3,[3,4]] while i < len(a): print(a[i]) i +=1 b = [4,3,2,1] b[1],b[0] = b[0],b[1] b b = [4,3,2,1] length = len(b) for i in range(length): for j in range(n-1-i): if b[j]>b[j+1]: b[j],b[j+1] = b[j+1],b[j] print("第%d轮排序结果:"%(i+1),b) print("排序后>",b) length = b for i in range(length): for j in range(1,4): ``` # 列表索引操作 - Mylist[index] - 正序索引,逆序索引 - 列表一定注意越界 - ![](../Photo/117.png) ## 列表切片操作 - Mylist[start:end] - 正序切片,逆序切片 ## 列表 +、*、in 、not in ## 使用for循环遍历元素 - for 循环可以遍历一切可迭代元素 ## EP: - 使用while 循环遍历列表 ## 列表的比较 - \>,<,>=,<=,==,!= ## 列表生成式 [x for x in range(10)] ## 列表的方法 ![](../Photo/118.png) ``` c = [1,2,3,4,5,6] for i in range(0,len(c)+3,3): c.insert(i,100) c c = [1,2,3,4,5,6] a = [1,2,3] a.pop(1) a a = 'a b c d' a.split( ' ',2) import random a = [1,2,3,4] random.shuffle(a) a ``` ## 将字符串分割成列表 - split 按照自定义的内容拆分 ## EP: ![](../Photo/119.png) ![](../Photo/120.png) ## 列表的复制 - copy 浅复制 - deepcopy import copy 深复制 - http://www.pythontutor.com/visualize.html#mode=edit ``` import copy ``` ## 列表排序 - sort - sorted - 列表的多级排序 - 匿名函数 ``` c = [1,3,4] c.sort(reverse = True) c (lambda x:print(x**2))(100) c = ['fangjian',] ``` ## EP: - 手动排序该列表[5,3,8,0,17],以升序或者降序 - 1 ![](../Photo/121.png) ``` chengji = [40,55,70,58] best = max(chengji) for i in chengji: if i >= best - 10: dengji = 'A' if i >= best - 20: dengji = 'B' if i >= best - 30: dengji = 'C' if i >= best - 40: dengji = 'D' print('score is',i,'grade is',dengji) ``` - 2 ![](../Photo/122.png) ``` a = [1,2,3] a.reverse() a ``` - 3 ![](../Photo/123.png) ``` import random i=0 while i<10: n = (random.randint(1,101)) i +=1 print(n,end ='\t') a = n.count(i) ``` - 4 ![](../Photo/124.png) ``` chengji =[58,91,37,82,46,100] avg = sum(chengji)/len(chengji) x1 = 0#大于 x2 = 0#小于 for i in chengji: if i >= avg: x1 +=1 if i < avg : x2 +=1 print('大于等于平均分数的有',x1,'人','低于平均分数的有',x2,'人') ``` - 5 ![](../Photo/125.png) ``` import random x = random.randint(0,10) for i in x(1000): n = set(int(x)) for j in n: print(j,':',x.count(j)) ``` - 6 ![](../Photo/126.png) - 7 ![](../Photo/127.png) ![](../Photo/128.png) - 8 ![](../Photo/129.png) - 9 ![](../Photo/130.png) - 10 ![](../Photo/131.png) - 11 ![](../Photo/132.png) - 12 ![](../Photo/133.png)
github_jupyter
a = []#里面可以放任何类型的数据 type (a) c = 'abcd' list(c)#强制转换 ''.join(['a','b','c']) a = [1,2] 10*a a = [1,2,3,4,5,[100,200]] a a[5][0] b = [1,2,3,4,5,6,7,8,9,10] for i in range (0,10,2): b[i] = 100 b b = [1,2,3,4,5,6,7,8,9,10,11] for i in range (0,10,3): print(b[i:i+2]) a = [1,2,3,[3,4]] count = 0 for i in a: if type(i)==list: for j in i: count +=1 else: count +=1 print(count) a = [1,2,3,[3,4]] while i < len(a): print(a[i]) i +=1 b = [4,3,2,1] b[1],b[0] = b[0],b[1] b b = [4,3,2,1] length = len(b) for i in range(length): for j in range(n-1-i): if b[j]>b[j+1]: b[j],b[j+1] = b[j+1],b[j] print("第%d轮排序结果:"%(i+1),b) print("排序后>",b) length = b for i in range(length): for j in range(1,4): c = [1,2,3,4,5,6] for i in range(0,len(c)+3,3): c.insert(i,100) c c = [1,2,3,4,5,6] a = [1,2,3] a.pop(1) a a = 'a b c d' a.split( ' ',2) import random a = [1,2,3,4] random.shuffle(a) a import copy c = [1,3,4] c.sort(reverse = True) c (lambda x:print(x**2))(100) c = ['fangjian',] chengji = [40,55,70,58] best = max(chengji) for i in chengji: if i >= best - 10: dengji = 'A' if i >= best - 20: dengji = 'B' if i >= best - 30: dengji = 'C' if i >= best - 40: dengji = 'D' print('score is',i,'grade is',dengji) a = [1,2,3] a.reverse() a import random i=0 while i<10: n = (random.randint(1,101)) i +=1 print(n,end ='\t') a = n.count(i) chengji =[58,91,37,82,46,100] avg = sum(chengji)/len(chengji) x1 = 0#大于 x2 = 0#小于 for i in chengji: if i >= avg: x1 +=1 if i < avg : x2 +=1 print('大于等于平均分数的有',x1,'人','低于平均分数的有',x2,'人') import random x = random.randint(0,10) for i in x(1000): n = set(int(x)) for j in n: print(j,':',x.count(j))
0.025846
0.810516
# Linear algebra in Python with NumPy In this lab, you will have the opportunity to remember some basic concepts about linear algebra and how to use them in Python. Numpy is one of the most used libraries in Python for arrays manipulation. It adds to Python a set of functions that allows us to operate on large multidimensional arrays with just a few lines. So forget about writing nested loops for adding matrices! With NumPy, this is as simple as adding numbers. Let us import the `numpy` library and assign the alias `np` for it. We will follow this convention in almost every notebook in this course, and you'll see this in many resources outside this course as well. ``` import numpy as np # The swiss knife of the data scientist. ``` ## Defining lists and numpy arrays ``` alist = [1, 2, 3, 4, 5] # Define a python list. It looks like an np array narray = np.array([1, 2, 3, 4]) # Define a numpy array ``` Note the difference between a Python list and a NumPy array. ``` print(alist) print(narray) print(type(alist)) print(type(narray)) ``` ## Algebraic operators on NumPy arrays vs. Python lists One of the common beginner mistakes is to mix up the concepts of NumPy arrays and Python lists. Just observe the next example, where we add two objects of the two mentioned types. Note that the '+' operator on NumPy arrays perform an element-wise addition, while the same operation on Python lists results in a list concatenation. Be careful while coding. Knowing this can save many headaches. ``` print(narray + narray) print(alist + alist) ``` It is the same as with the product operator, `*`. In the first case, we scale the vector, while in the second case, we concatenate three times the same list. ``` print(narray * 3) print(alist * 3) ``` Be aware of the difference because, within the same function, both types of arrays can appear. Numpy arrays are designed for numerical and matrix operations, while lists are for more general purposes. ## Matrix or Array of Arrays In linear algebra, a matrix is a structure composed of n rows by m columns. That means each row must have the same number of columns. With NumPy, we have two ways to create a matrix: * Creating an array of arrays using `np.array` (recommended). * Creating a matrix using `np.matrix` (still available but might be removed soon). NumPy arrays or lists can be used to initialize a matrix, but the resulting matrix will be composed of NumPy arrays only. ``` npmatrix1 = np.array([narray, narray, narray]) # Matrix initialized with NumPy arrays npmatrix2 = np.array([alist, alist, alist]) # Matrix initialized with lists npmatrix3 = np.array([narray, [1, 1, 1, 1], narray]) # Matrix initialized with both types print(npmatrix1) print(npmatrix2) print(npmatrix3) ``` However, when defining a matrix, be sure that all the rows contain the same number of elements. Otherwise, the linear algebra operations could lead to unexpected results. Analyze the following two examples: ``` # Example 1: okmatrix = np.array([[1, 2], [3, 4]]) # Define a 2x2 matrix print(okmatrix) # Print okmatrix print(okmatrix * 2) # Print a scaled version of okmatrix # Example 2: badmatrix = np.array([[1, 2], [3, 4], [5, 6, 7]]) # Define a matrix. Note the third row contains 3 elements print(badmatrix) # Print the malformed matrix print(badmatrix * 2) # It is supposed to scale the whole matrix ``` ## Scaling and translating matrices Now that you know how to build correct NumPy arrays and matrices, let us see how easy it is to operate with them in Python using the regular algebraic operators like + and -. Operations can be performed between arrays and arrays or between arrays and scalars. ``` # Scale by 2 and translate 1 unit the matrix result = okmatrix * 2 + 1 # For each element in the matrix, multiply by 2 and add 1 print(result) # Add two sum compatible matrices result1 = okmatrix + okmatrix print(result1) # Subtract two sum compatible matrices. This is called the difference vector result2 = okmatrix - okmatrix print(result2) ``` The product operator `*` when used on arrays or matrices indicates element-wise multiplications. Do not confuse it with the dot product. ``` result = okmatrix * okmatrix # Multiply each element by itself print(result) ``` ## Transpose a matrix In linear algebra, the transpose of a matrix is an operator that flips a matrix over its diagonal, i.e., the transpose operator switches the row and column indices of the matrix producing another matrix. If the original matrix dimension is n by m, the resulting transposed matrix will be m by n. **T** denotes the transpose operations with NumPy matrices. ``` matrix3x2 = np.array([[1, 2], [3, 4], [5, 6]]) # Define a 3x2 matrix print('Original matrix 3 x 2') print(matrix3x2) print('Transposed matrix 2 x 3') print(matrix3x2.T) ``` However, note that the transpose operation does not affect 1D arrays. ``` nparray = np.array([1, 2, 3, 4]) # Define an array print('Original array') print(nparray) print('Transposed array') print(nparray.T) ``` perhaps in this case you wanted to do: ``` nparray = np.array([[1, 2, 3, 4]]) # Define a 1 x 4 matrix. Note the 2 level of square brackets print('Original array') print(nparray) print('Transposed array') print(nparray.T) ``` ## Get the norm of a nparray or matrix In linear algebra, the norm of an n-dimensional vector $\vec a$ is defined as: $$ norm(\vec a) = ||\vec a|| = \sqrt {\sum_{i=1}^{n} a_i ^ 2}$$ Calculating the norm of vector or even of a matrix is a general operation when dealing with data. Numpy has a set of functions for linear algebra in the subpackage **linalg**, including the **norm** function. Let us see how to get the norm a given array or matrix: ``` nparray1 = np.array([1, 2, 3, 4]) # Define an array norm1 = np.linalg.norm(nparray1) nparray2 = np.array([[1, 2], [3, 4]]) # Define a 2 x 2 matrix. Note the 2 level of square brackets norm2 = np.linalg.norm(nparray2) print(norm1) print(norm2) ``` Note that without any other parameter, the norm function treats the matrix as being just an array of numbers. However, it is possible to get the norm by rows or by columns. The **axis** parameter controls the form of the operation: * **axis=0** means get the norm of each column * **axis=1** means get the norm of each row. ``` nparray2 = np.array([[1, 1], [2, 2], [3, 3]]) # Define a 3 x 2 matrix. normByCols = np.linalg.norm(nparray2, axis=0) # Get the norm for each column. Returns 2 elements normByRows = np.linalg.norm(nparray2, axis=1) # get the norm for each row. Returns 3 elements print(normByCols) print(normByRows) ``` However, there are more ways to get the norm of a matrix in Python. For that, let us see all the different ways of defining the dot product between 2 arrays. ## The dot product between arrays: All the flavors The dot product or scalar product or inner product between two vectors $\vec a$ and $\vec a$ of the same size is defined as: $$\vec a \cdot \vec b = \sum_{i=1}^{n} a_i b_i$$ The dot product takes two vectors and returns a single number. ``` nparray1 = np.array([0, 1, 2, 3]) # Define an array nparray2 = np.array([4, 5, 6, 7]) # Define an array flavor1 = np.dot(nparray1, nparray2) # Recommended way print(flavor1) flavor2 = np.sum(nparray1 * nparray2) # Ok way print(flavor2) flavor3 = nparray1 @ nparray2 # Geeks way print(flavor3) # As you never should do: # Noobs way flavor4 = 0 for a, b in zip(nparray1, nparray2): flavor4 += a * b print(flavor4) ``` **We strongly recommend using np.dot, since it is the only method that accepts arrays and lists without problems** ``` norm1 = np.dot(np.array([1, 2]), np.array([3, 4])) # Dot product on nparrays norm2 = np.dot([1, 2], [3, 4]) # Dot product on python lists print(norm1, '=', norm2 ) ``` Finally, note that the norm is the square root of the dot product of the vector with itself. That gives many options to write that function: $$ norm(\vec a) = ||\vec a|| = \sqrt {\sum_{i=1}^{n} a_i ^ 2} = \sqrt {a \cdot a}$$ ## Sums by rows or columns Another general operation performed on matrices is the sum by rows or columns. Just as we did for the function norm, the **axis** parameter controls the form of the operation: * **axis=0** means to sum the elements of each column together. * **axis=1** means to sum the elements of each row together. ``` nparray2 = np.array([[1, -1], [2, -2], [3, -3]]) # Define a 3 x 2 matrix. sumByCols = np.sum(nparray2, axis=0) # Get the sum for each column. Returns 2 elements sumByRows = np.sum(nparray2, axis=1) # get the sum for each row. Returns 3 elements print('Sum by columns: ') print(sumByCols) print('Sum by rows:') print(sumByRows) ``` ## Get the mean by rows or columns As with the sums, one can get the **mean** by rows or columns using the **axis** parameter. Just remember that the mean is the sum of the elements divided by the length of the vector $$ mean(\vec a) = \frac {{\sum_{i=1}^{n} a_i }}{n}$$ ``` nparray2 = np.array([[1, -1], [2, -2], [3, -3]]) # Define a 3 x 2 matrix. Chosen to be a matrix with 0 mean mean = np.mean(nparray2) # Get the mean for the whole matrix meanByCols = np.mean(nparray2, axis=0) # Get the mean for each column. Returns 2 elements meanByRows = np.mean(nparray2, axis=1) # get the mean for each row. Returns 3 elements print('Matrix mean: ') print(mean) print('Mean by columns: ') print(meanByCols) print('Mean by rows:') print(meanByRows) ``` ## Center the columns of a matrix Centering the attributes of a data matrix is another essential preprocessing step. Centering a matrix means to remove the column mean to each element inside the column. The sum by columns of a centered matrix is always 0. With NumPy, this process is as simple as this: ``` nparray2 = np.array([[1, 1], [2, 2], [3, 3]]) # Define a 3 x 2 matrix. nparrayCentered = nparray2 - np.mean(nparray2, axis=0) # Remove the mean for each column print('Original matrix') print(nparray2) print('Centered by columns matrix') print(nparrayCentered) print('New mean by column') print(nparrayCentered.mean(axis=0)) ``` **Warning:** This process does not apply for row centering. In such cases, consider transposing the matrix, centering by columns, and then transpose back the result. See the example below: ``` nparray2 = np.array([[1, 3], [2, 4], [3, 5]]) # Define a 3 x 2 matrix. nparrayCentered = nparray2.T - np.mean(nparray2, axis=1) # Remove the mean for each row nparrayCentered = nparrayCentered.T # Transpose back the result print('Original matrix') print(nparray2) print('Centered by columns matrix') print(nparrayCentered) print('New mean by rows') print(nparrayCentered.mean(axis=1)) ``` Note that some operations can be performed using static functions like `np.sum()` or `np.mean()`, or by using the inner functions of the array ``` nparray2 = np.array([[1, 3], [2, 4], [3, 5]]) # Define a 3 x 2 matrix. mean1 = np.mean(nparray2) # Static way mean2 = nparray2.mean() # Dinamic way print(mean1, ' == ', mean2) ``` Even if they are equivalent, we recommend the use of the static way always. **Congratulations! You have successfully reviewed vector and matrix operations with Numpy!**
github_jupyter
import numpy as np # The swiss knife of the data scientist. alist = [1, 2, 3, 4, 5] # Define a python list. It looks like an np array narray = np.array([1, 2, 3, 4]) # Define a numpy array print(alist) print(narray) print(type(alist)) print(type(narray)) print(narray + narray) print(alist + alist) print(narray * 3) print(alist * 3) npmatrix1 = np.array([narray, narray, narray]) # Matrix initialized with NumPy arrays npmatrix2 = np.array([alist, alist, alist]) # Matrix initialized with lists npmatrix3 = np.array([narray, [1, 1, 1, 1], narray]) # Matrix initialized with both types print(npmatrix1) print(npmatrix2) print(npmatrix3) # Example 1: okmatrix = np.array([[1, 2], [3, 4]]) # Define a 2x2 matrix print(okmatrix) # Print okmatrix print(okmatrix * 2) # Print a scaled version of okmatrix # Example 2: badmatrix = np.array([[1, 2], [3, 4], [5, 6, 7]]) # Define a matrix. Note the third row contains 3 elements print(badmatrix) # Print the malformed matrix print(badmatrix * 2) # It is supposed to scale the whole matrix # Scale by 2 and translate 1 unit the matrix result = okmatrix * 2 + 1 # For each element in the matrix, multiply by 2 and add 1 print(result) # Add two sum compatible matrices result1 = okmatrix + okmatrix print(result1) # Subtract two sum compatible matrices. This is called the difference vector result2 = okmatrix - okmatrix print(result2) result = okmatrix * okmatrix # Multiply each element by itself print(result) matrix3x2 = np.array([[1, 2], [3, 4], [5, 6]]) # Define a 3x2 matrix print('Original matrix 3 x 2') print(matrix3x2) print('Transposed matrix 2 x 3') print(matrix3x2.T) nparray = np.array([1, 2, 3, 4]) # Define an array print('Original array') print(nparray) print('Transposed array') print(nparray.T) nparray = np.array([[1, 2, 3, 4]]) # Define a 1 x 4 matrix. Note the 2 level of square brackets print('Original array') print(nparray) print('Transposed array') print(nparray.T) nparray1 = np.array([1, 2, 3, 4]) # Define an array norm1 = np.linalg.norm(nparray1) nparray2 = np.array([[1, 2], [3, 4]]) # Define a 2 x 2 matrix. Note the 2 level of square brackets norm2 = np.linalg.norm(nparray2) print(norm1) print(norm2) nparray2 = np.array([[1, 1], [2, 2], [3, 3]]) # Define a 3 x 2 matrix. normByCols = np.linalg.norm(nparray2, axis=0) # Get the norm for each column. Returns 2 elements normByRows = np.linalg.norm(nparray2, axis=1) # get the norm for each row. Returns 3 elements print(normByCols) print(normByRows) nparray1 = np.array([0, 1, 2, 3]) # Define an array nparray2 = np.array([4, 5, 6, 7]) # Define an array flavor1 = np.dot(nparray1, nparray2) # Recommended way print(flavor1) flavor2 = np.sum(nparray1 * nparray2) # Ok way print(flavor2) flavor3 = nparray1 @ nparray2 # Geeks way print(flavor3) # As you never should do: # Noobs way flavor4 = 0 for a, b in zip(nparray1, nparray2): flavor4 += a * b print(flavor4) norm1 = np.dot(np.array([1, 2]), np.array([3, 4])) # Dot product on nparrays norm2 = np.dot([1, 2], [3, 4]) # Dot product on python lists print(norm1, '=', norm2 ) nparray2 = np.array([[1, -1], [2, -2], [3, -3]]) # Define a 3 x 2 matrix. sumByCols = np.sum(nparray2, axis=0) # Get the sum for each column. Returns 2 elements sumByRows = np.sum(nparray2, axis=1) # get the sum for each row. Returns 3 elements print('Sum by columns: ') print(sumByCols) print('Sum by rows:') print(sumByRows) nparray2 = np.array([[1, -1], [2, -2], [3, -3]]) # Define a 3 x 2 matrix. Chosen to be a matrix with 0 mean mean = np.mean(nparray2) # Get the mean for the whole matrix meanByCols = np.mean(nparray2, axis=0) # Get the mean for each column. Returns 2 elements meanByRows = np.mean(nparray2, axis=1) # get the mean for each row. Returns 3 elements print('Matrix mean: ') print(mean) print('Mean by columns: ') print(meanByCols) print('Mean by rows:') print(meanByRows) nparray2 = np.array([[1, 1], [2, 2], [3, 3]]) # Define a 3 x 2 matrix. nparrayCentered = nparray2 - np.mean(nparray2, axis=0) # Remove the mean for each column print('Original matrix') print(nparray2) print('Centered by columns matrix') print(nparrayCentered) print('New mean by column') print(nparrayCentered.mean(axis=0)) nparray2 = np.array([[1, 3], [2, 4], [3, 5]]) # Define a 3 x 2 matrix. nparrayCentered = nparray2.T - np.mean(nparray2, axis=1) # Remove the mean for each row nparrayCentered = nparrayCentered.T # Transpose back the result print('Original matrix') print(nparray2) print('Centered by columns matrix') print(nparrayCentered) print('New mean by rows') print(nparrayCentered.mean(axis=1)) nparray2 = np.array([[1, 3], [2, 4], [3, 5]]) # Define a 3 x 2 matrix. mean1 = np.mean(nparray2) # Static way mean2 = nparray2.mean() # Dinamic way print(mean1, ' == ', mean2)
0.688678
0.995727
## Create a system mixing SMIRNOFF and non-SMIRNOFF-formatted force fields This example shows how to create a receptor-ligand `System` where the ligand (toluene) is parametrized with a SMIRNOFF force field and the protein (T4 Lysozyme) solvated in water is assigned AMBER and TIP3P-FB parameters through the ParmEd library. We'll need two PDB files. One for the ligand in vacuum, and one for the solvated protein without ligand. The coordinates of the protein-ligand complex will be determined by these PDB files, so their positions needs to be consistent with the ligand being positioned in the binding pocket if this is the desired initial configuration of your simulation. ### Parametrize a molecule with smirnoff99Frosst First, we parametrize the ligand (toluene) with the smirnoff99Frosst force field through the usual route to create an OpenMM `System`. ``` from simtk.openmm.app import PDBFile from openforcefield.utils import get_data_file_path from openforcefield.topology import Molecule, Topology from openforcefield.typing.engines.smirnoff import ForceField # Create an OpenFF Topology of toluene from a pdb file. toluene_pdb_file_path = get_data_file_path('molecules/toluene.pdb') toluene_pdbfile = PDBFile(toluene_pdb_file_path) toluene = Molecule.from_smiles('Cc1ccccc1') off_topology = Topology.from_openmm(openmm_topology=toluene_pdbfile.topology, unique_molecules=[toluene]) # Load the smirnoff99Frosst system from disk. force_field = ForceField('smirnoff99Frosst.offxml') # Parametrize the toluene molecule. toluene_system = force_field.create_openmm_system(off_topology) ``` and we convert the OpenMM `System` to a ParmEd `Structure` that we'll be able to mix with the protein. <div class="alert alert-block alert-warning"> <b>Warning:</b> ParmEd's Structure model is inspired by AMBER. Some information in an OpenMM System are not directly translatable into a Structure. In particular, long-range interaction treatment method (e.g., PME, CutoffPeriodic) and parameters (e.g., cutoff and cutoff switching distance, PME error tolerance) are known to be lost during the conversion. </div> ``` import parmed # Convert OpenMM System into a ParmEd Structure. toluene_structure = parmed.openmm.load_topology(toluene_pdbfile.topology, toluene_system, xyz=toluene_pdbfile.positions) ``` ### Create a ParmEd `Structure` of an AMBER-parametrized receptor in TIP3P-FB water We have to create a ParmEd `Structure` of the receptor (T4 Lysozyme) to combine to the toluene `Structure`. Here we assign `amber99sbildn` to a T4 Lysozyme receptor solvated in TIP3P-FB water using OpenMM. First, we load the parameters and the PDB file including positions for the protein, water, and ion atoms. <div class="alert alert-block alert-info"> <b>Note:</b> If you already have AMBER (prmtop/inpcrd), GROMACS (top/gro), or any other file supported by ParmEd specifying the parameters for the solvated protein, you can simply load the files directly into a Structure using ParmEd's functionalities. See https://parmed.github.io/ParmEd/html/readwrite.html . </div> ``` # Load the AMBER protein force field parameters through OpenMM. from simtk.openmm import app omm_forcefield = app.ForceField('amber99sbildn.xml', 'tip3pfb.xml') # Load the solvated receptor from a PDB file. t4_pdb_file_path = get_data_file_path('systems/test_systems/T4_lysozyme_water_ions.pdb') t4_pdb_file = PDBFile(t4_pdb_file_path) # Obtain the updated OpenMM Topology and positions. omm_topology = t4_pdb_file.getTopology() positions = t4_pdb_file.getPositions() ``` We then create a parametrized OpenMM `System` and convert it to a `Structure`. Note the `rigidWater=False` argument in `ForceField.createSystem()`. This is necessary to work around a problem araising with ParmEd in reading the parameters of constrained bonds from an OpenMM `System` (see https://github.com/openforcefield/openforcefield/issues/259 for more details). We'll re-add the hydrogen bond constraints when we'll create the `System` for the complex. <div class="alert alert-block alert-info"> <b>Note:</b> If you don't solvate the system or if you load it directly from AMBER, GROMACS, or other files directly to ParmEd, you won't need extra precautions. </div> ``` # Parameterize the protein. t4_system = omm_forcefield.createSystem(omm_topology, rigidWater=False) # Convert the protein System into a ParmEd Structure. t4_structure = parmed.openmm.load_topology(omm_topology, t4_system, xyz=positions) ``` ### Combine receptor and ligand structures We can then merge the receptor and ligand `Structure` objects to form the complex. Note that the coordinates of protein and ligand in this example are determined by the PDB file, and they are already consistent with the ligand being positioned in the binding pocket. <div class="alert alert-block alert-info"> <b>Note:</b> If you want to include water molecules in the binding site, you will have to be careful to place them so that they won't create steric clashes once the ligand is inserted. </div> ``` complex_structure = toluene_structure + t4_structure ``` ### Convert back the structure into an OpenMM System Once we have the `Structure` of the complex, we can chose to create a `System` object that we can simulate with OpenMM. ``` from simtk.openmm.app import NoCutoff, HBonds from simtk import unit # Convert the Structure to an OpenMM System in vacuum. complex_system = complex_structure.createSystem(nonbondedMethod=NoCutoff, nonbondedCutoff=9.0*unit.angstrom, constraints=HBonds, removeCMMotion=False) ```
github_jupyter
from simtk.openmm.app import PDBFile from openforcefield.utils import get_data_file_path from openforcefield.topology import Molecule, Topology from openforcefield.typing.engines.smirnoff import ForceField # Create an OpenFF Topology of toluene from a pdb file. toluene_pdb_file_path = get_data_file_path('molecules/toluene.pdb') toluene_pdbfile = PDBFile(toluene_pdb_file_path) toluene = Molecule.from_smiles('Cc1ccccc1') off_topology = Topology.from_openmm(openmm_topology=toluene_pdbfile.topology, unique_molecules=[toluene]) # Load the smirnoff99Frosst system from disk. force_field = ForceField('smirnoff99Frosst.offxml') # Parametrize the toluene molecule. toluene_system = force_field.create_openmm_system(off_topology) import parmed # Convert OpenMM System into a ParmEd Structure. toluene_structure = parmed.openmm.load_topology(toluene_pdbfile.topology, toluene_system, xyz=toluene_pdbfile.positions) # Load the AMBER protein force field parameters through OpenMM. from simtk.openmm import app omm_forcefield = app.ForceField('amber99sbildn.xml', 'tip3pfb.xml') # Load the solvated receptor from a PDB file. t4_pdb_file_path = get_data_file_path('systems/test_systems/T4_lysozyme_water_ions.pdb') t4_pdb_file = PDBFile(t4_pdb_file_path) # Obtain the updated OpenMM Topology and positions. omm_topology = t4_pdb_file.getTopology() positions = t4_pdb_file.getPositions() # Parameterize the protein. t4_system = omm_forcefield.createSystem(omm_topology, rigidWater=False) # Convert the protein System into a ParmEd Structure. t4_structure = parmed.openmm.load_topology(omm_topology, t4_system, xyz=positions) complex_structure = toluene_structure + t4_structure from simtk.openmm.app import NoCutoff, HBonds from simtk import unit # Convert the Structure to an OpenMM System in vacuum. complex_system = complex_structure.createSystem(nonbondedMethod=NoCutoff, nonbondedCutoff=9.0*unit.angstrom, constraints=HBonds, removeCMMotion=False)
0.58676
0.955236
# Temporal RSA This demo notebook demonstrates how to work with temporal data in the RSA toolbox So far, it demonstrates how to (1) import temporal dataset into the `pyrsa.data.TemporalDataset` class and (2) how to create RDM movies using the `pyrsa.rdm.calc_rdm_movie` function The notebook will ``` import numpy as np import matplotlib.pyplot as plt import pyrsa import pickle from pyrsa.rdm import calc_rdm_movie ``` ## Load temporal data I here used sample data from mne-python https://mne.tools/dev/overview/datasets_index.html#sample Data is comprised of the preprocessed MEG data in "sample_audvis_raw.fif". Preprocessing includes: - downsampling to 60Hz - band-pass filtering between 1 Hz and 20 Hz - rejecting bad trials using an amplitude threshold - baseline correction (basline -200 to 0 ms) *See demos/TemporalSampleData/preproc_mn_sample_data.py* The preprocessed data is stored in *TemporalSampleData/meg_sample_data.pkl* ``` dat = pickle.load( open( "TemporalSampleData/meg_sample_data.pkl", "rb" ) ) measurements = dat['data'] cond_names = [x for x in dat['cond_names'].keys()] cond_idx = dat['cond_idx'] channel_names = dat['channel_names'] times = dat['times'] print('there are %d observations (trials), %d channels, and %d time-points\n' % (measurements.shape)) print('conditions:') print(cond_names) ``` Plot condition averages for two channels: ``` fig, ax = plt.subplots(1, 2, figsize=(12,4)) ax = ax.flatten() for jj,chan in enumerate(channel_names[:2]): for ii, cond_ii in enumerate(np.unique(cond_idx)): mn = measurements[cond_ii == cond_idx,jj,:].mean(0).squeeze() ax[jj].plot(times, mn, label = cond_names[ii]) ax[jj].set_title(chan) ax[jj].legend() plt.show() ``` ## The `pyrsa.data.TemporalDataset` class `measurements` is an `np.array` of shape n_obs x n_channels x n_times `time_descriptor` should contain the time-point vector for the measurements of length n_times. it is recommended to call this descriptor 'time' ``` tim_des = {'time': times} ``` the other descriptors are identical as in the `pyrsa.data.Dataset` class ``` des = {'session': 0, 'subj': 0} obs_des = {'conds': cond_idx} chn_des = {'channels': channel_names} data = pyrsa.data.TemporalDataset(measurements, descriptors = des, obs_descriptors = obs_des, channel_descriptors = chn_des, time_descriptors = tim_des) data.sort_by('conds') ``` ### convenience methods `pyrsa.data.TemporalDataset` comes with the same convenience methods as `pyrsa.data.Dataset`. In addition, the following functions are provided: - `pyrsa.data.TemporalDataset.split_time(by)` - `pyrsa.data.TemporalDataset.subset_time(by, t_from, t_to)` - `pyrsa.data.TemporalDataset.bin_time(by, bins)` - `pyrsa.data.TemporalDataset.convert_to_dataset(by)` #### `pyrsa.data.TemporalDataset.split_time(by)` splits the `pyrsa.data.TemporalDataset` object into a list of n_times `pyrsa.data.TemporalDatset` objects, splitting the measurements along the time_descriptor `by` ``` print('shape of original measurements') print(data.measurements.shape) data_split_time = data.split_time('time') print('\nafter splitting') print(len(data_split_time)) print(data_split_time[0].measurements.shape) ``` #### `pyrsa.data.TemporalDataset.subset_time(by, t_from, t_to)` returns a new `pyrsa.data.TemporalDataset` with only the data between where `time_descriptors[by]` is between t_from and t_to ``` print('shape of original measurements') print(data.measurements.shape) data_subset_time = data.subset_time('time', t_from = -.1, t_to = .5) print('\nafter subsetting') print(data_subset_time.measurements.shape) print(data_subset_time.time_descriptors['time'][0]) ``` #### `pyrsa.data.TemporalDataset.bin_time(by, bins)` returns a new `pyrsa.data.TemporalDataset` object with binned temporal data. data within bins is averaged. `bins` is a list or array, where the first dimension contains the bins, and the second dimension the old time-bins that should go into this bin. ``` bins = np.reshape(tim_des['time'], [-1, 2]) print(len(bins)) print(bins[0]) print('shape of original measurements') print(data.measurements.shape) data_binned = data.bin_time('time', bins=bins) print('\nafter binning') print(data_binned.measurements.shape) print(data_binned.time_descriptors['time'][0]) ``` #### `pyrsa.data.TemporalDataset.convert_to_dataset(by)` returns a `pyrsa.data.Dataset` object where the time dimension is absorbed into the observation dimension ``` print('shape of original measurements') print(data.measurements.shape) data_dataset = data.convert_to_dataset('time') print('\nafter binning') print(data_dataset.measurements.shape) print(data_dataset.obs_descriptors['time'][0]) ``` ## create RDM movie the function `calc_rdm_movie` takes `pyrsa.data.TemporalDataset` as an input and outputs an RDMs `pyrsa.rdm.RDMs` object. It works like `calc_rdm`. ``` rdms_data = calc_rdm_movie(data, method = 'euclidean', descriptor = 'conds') print(rdms_data) ``` Binning can be applied before computing the RDMs by simpling specifying the bins argument ``` rdms_data_binned = calc_rdm_movie(data, method = 'euclidean', descriptor = 'conds', bins=bins) print(rdms_data_binned) ``` ## from here on The following are examples for data analysis and plotting with temporal data. So far it uses the functions for non-temporal data of the toolbox. This section should be expanded once new temporal RSA functions are added to the toolbox. I here use plotting from the standard plotting function. ``` plt.subplots(1, figsize=(10,15)) # add formated time as rdm_descriptor rdms_data_binned.rdm_descriptors['time_formatted'] = ['%0.0f ms' % (np.round(x*1000,2)) for x in rdms_data_binned.rdm_descriptors['time']] pyrsa.vis.show_rdm(rdms_data_binned, do_rank_transform=False, pattern_descriptor='conds', rdm_descriptor='time_formatted') ``` ## Model rdms This is a simple example with basic model RDMs ``` from pyrsa.rdm import get_categorical_rdm rdms_model_in = get_categorical_rdm(['%d' % x for x in range(4)]) rdms_model_lr = get_categorical_rdm(['l','r','l','r']) rdms_model_av = get_categorical_rdm(['a','a','v','v']) model_names = ['independent', 'left/right', 'audio/visual'] # append in one RDMs object model_rdms = rdms_model_in model_rdms.append(rdms_model_lr) model_rdms.append(rdms_model_av) model_rdms.rdm_descriptors['model_names'] = model_names model_rdms.pattern_descriptors['cond_names'] = cond_names plt.figure(figsize=(10,10)) pyrsa.vis.show_rdm(model_rdms, rdm_descriptor='model_names', pattern_descriptor = 'cond_names') ``` ## data - model similarity across time ``` from pyrsa.rdm import compare r = [] for mod in model_rdms: r.append(compare(mod, rdms_data_binned, method='cosine')) for i, r_ in enumerate(r): plt.plot(rdms_data_binned.rdm_descriptors['time'], r_.squeeze(), label=model_names[i]) plt.xlabel('time') plt.ylabel('model-data cosine similarity') plt.legend() ```
github_jupyter
import numpy as np import matplotlib.pyplot as plt import pyrsa import pickle from pyrsa.rdm import calc_rdm_movie dat = pickle.load( open( "TemporalSampleData/meg_sample_data.pkl", "rb" ) ) measurements = dat['data'] cond_names = [x for x in dat['cond_names'].keys()] cond_idx = dat['cond_idx'] channel_names = dat['channel_names'] times = dat['times'] print('there are %d observations (trials), %d channels, and %d time-points\n' % (measurements.shape)) print('conditions:') print(cond_names) fig, ax = plt.subplots(1, 2, figsize=(12,4)) ax = ax.flatten() for jj,chan in enumerate(channel_names[:2]): for ii, cond_ii in enumerate(np.unique(cond_idx)): mn = measurements[cond_ii == cond_idx,jj,:].mean(0).squeeze() ax[jj].plot(times, mn, label = cond_names[ii]) ax[jj].set_title(chan) ax[jj].legend() plt.show() tim_des = {'time': times} des = {'session': 0, 'subj': 0} obs_des = {'conds': cond_idx} chn_des = {'channels': channel_names} data = pyrsa.data.TemporalDataset(measurements, descriptors = des, obs_descriptors = obs_des, channel_descriptors = chn_des, time_descriptors = tim_des) data.sort_by('conds') print('shape of original measurements') print(data.measurements.shape) data_split_time = data.split_time('time') print('\nafter splitting') print(len(data_split_time)) print(data_split_time[0].measurements.shape) print('shape of original measurements') print(data.measurements.shape) data_subset_time = data.subset_time('time', t_from = -.1, t_to = .5) print('\nafter subsetting') print(data_subset_time.measurements.shape) print(data_subset_time.time_descriptors['time'][0]) bins = np.reshape(tim_des['time'], [-1, 2]) print(len(bins)) print(bins[0]) print('shape of original measurements') print(data.measurements.shape) data_binned = data.bin_time('time', bins=bins) print('\nafter binning') print(data_binned.measurements.shape) print(data_binned.time_descriptors['time'][0]) print('shape of original measurements') print(data.measurements.shape) data_dataset = data.convert_to_dataset('time') print('\nafter binning') print(data_dataset.measurements.shape) print(data_dataset.obs_descriptors['time'][0]) rdms_data = calc_rdm_movie(data, method = 'euclidean', descriptor = 'conds') print(rdms_data) rdms_data_binned = calc_rdm_movie(data, method = 'euclidean', descriptor = 'conds', bins=bins) print(rdms_data_binned) plt.subplots(1, figsize=(10,15)) # add formated time as rdm_descriptor rdms_data_binned.rdm_descriptors['time_formatted'] = ['%0.0f ms' % (np.round(x*1000,2)) for x in rdms_data_binned.rdm_descriptors['time']] pyrsa.vis.show_rdm(rdms_data_binned, do_rank_transform=False, pattern_descriptor='conds', rdm_descriptor='time_formatted') from pyrsa.rdm import get_categorical_rdm rdms_model_in = get_categorical_rdm(['%d' % x for x in range(4)]) rdms_model_lr = get_categorical_rdm(['l','r','l','r']) rdms_model_av = get_categorical_rdm(['a','a','v','v']) model_names = ['independent', 'left/right', 'audio/visual'] # append in one RDMs object model_rdms = rdms_model_in model_rdms.append(rdms_model_lr) model_rdms.append(rdms_model_av) model_rdms.rdm_descriptors['model_names'] = model_names model_rdms.pattern_descriptors['cond_names'] = cond_names plt.figure(figsize=(10,10)) pyrsa.vis.show_rdm(model_rdms, rdm_descriptor='model_names', pattern_descriptor = 'cond_names') from pyrsa.rdm import compare r = [] for mod in model_rdms: r.append(compare(mod, rdms_data_binned, method='cosine')) for i, r_ in enumerate(r): plt.plot(rdms_data_binned.rdm_descriptors['time'], r_.squeeze(), label=model_names[i]) plt.xlabel('time') plt.ylabel('model-data cosine similarity') plt.legend()
0.414188
0.983534
# Preprosessing ``` import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np # random split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, shuffle=False) X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, shuffle=False) # one hot incoding one_hot_features = ['relationship', 'race', 'occupation', 'marital-status', 'sex', 'workclass'] from sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder(handle_unknown='ignore') enc.fit(X_train[one_hot_features]) X_train_enc_oh = pd.DataFrame(enc.transform(X_train[one_hot_features]).toarray(), columns=enc.get_feature_names()) X_test_enc_oh = pd.DataFrame(enc.transform(X_test[one_hot_features]).toarray(), columns=enc.get_feature_names()) X_train = pd.concat([X_train.drop(columns = one_hot_features), X_train_enc_oh], axis=1) del X_train_enc_oh X_test = pd.concat([X_test.drop(columns = one_hot_features), X_test_enc_oh], axis=1) del X_test_enc_oh # ordinal incoding ordinal_features = ['education'] education_order = [' Preschool', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' 10th', ' 11th', ' 12th', ' HS-grad', ' Prof-school', ' Assoc-acdm', ' Assoc-voc', ' Some-college', ' Bachelors', ' Masters', ' Doctorate'] from sklearn.preprocessing import OrdinalEncoder encoder = OrdinalEncoder(categories=[education_order]) encoder.fit(X_train[ordinal_features].values.reshape(-1, 1)) X_train_enc_ord = pd.DataFrame(encoder.transform(X_train[ordinal_features]), columns=ordinal_features) X_test_enc_ord = pd.DataFrame(encoder.transform(X_test[ordinal_features]), columns=ordinal_features) # Normalize feature mue = df[feature].mean() sigma = df[feature].std() df[feature] = (df[feature] - mue)/sigma # Dimensionality Reduction with PCA from sklearn.decomposition import PCA def plot_PCA_curve(X_train): pca = PCA() pca.fit(X_train) plt.figure(figsize=(7,7)) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.xlabel('cumulative explained variance') plt.plot(range(1, len(pca.explained_variance_ratio_) + 1), pca.explained_variance_ratio_) plt.xlabel('k (component)') plt.title('Percentage of variance explained by given component'); def get_n_pca_components(n, X_train, X_test): pca = PCA(n_components=n) pca.fit(X_train) X_train_t = pca.transform(X_train) X_test_t = pca.transform(X_test) return X_train_t, X_test_t import scipy from tqdm import tqdm def map_features(df, features_for_embedding, threshold=0.99): lookup_dict = {} data_mapped = df.copy(deep=True) for col in tqdm(features_for_embedding): # Replace rare values with the string 'OOV' normalized_vc = data_mapped[col].value_counts(normalize=True).cumsum() vals_to_remove = list(normalized_vc[normalized_vc > threshold].index) # print(f'Removing the following values from {col}: {vals_to_remove}') data_mapped.loc[data_mapped[col].isin(vals_to_remove), col] = 'OOV' # Create the mapping col_mapping = {k: v for k, v in enumerate(data_mapped[col].unique(), start=1) if v != 'OOV'} col_mapping[0] = 'OOV' # rerank the keys to make sure that we have no missing key: ranked_keys = scipy.stats.rankdata(list(col_mapping.keys())) - 1 reranked_col_mapping = dict(zip(ranked_keys, col_mapping.values())) # add corresponding mapped columns inverse_col_mapping = {v: k for k, v in reranked_col_mapping.items()} data_mapped[f'{col}'] = data_mapped[col].map(inverse_col_mapping) lookup_dict[col] = inverse_col_mapping # display(lookup_dict) # display(data_mapped) return data_mapped, lookup_dict def map_test(df_test: pd.DataFrame, lookup_dict: dict): data_mapped = pd.DataFrame() for key in tqdm(lookup_dict.keys()): data_mapped[key] = df_test_t[key].apply(lambda x: lookup_dict[key][x] if x in lookup_dict[key].keys() else 0) return data_mapped ```
github_jupyter
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np # random split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, shuffle=False) X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, shuffle=False) # one hot incoding one_hot_features = ['relationship', 'race', 'occupation', 'marital-status', 'sex', 'workclass'] from sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder(handle_unknown='ignore') enc.fit(X_train[one_hot_features]) X_train_enc_oh = pd.DataFrame(enc.transform(X_train[one_hot_features]).toarray(), columns=enc.get_feature_names()) X_test_enc_oh = pd.DataFrame(enc.transform(X_test[one_hot_features]).toarray(), columns=enc.get_feature_names()) X_train = pd.concat([X_train.drop(columns = one_hot_features), X_train_enc_oh], axis=1) del X_train_enc_oh X_test = pd.concat([X_test.drop(columns = one_hot_features), X_test_enc_oh], axis=1) del X_test_enc_oh # ordinal incoding ordinal_features = ['education'] education_order = [' Preschool', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' 10th', ' 11th', ' 12th', ' HS-grad', ' Prof-school', ' Assoc-acdm', ' Assoc-voc', ' Some-college', ' Bachelors', ' Masters', ' Doctorate'] from sklearn.preprocessing import OrdinalEncoder encoder = OrdinalEncoder(categories=[education_order]) encoder.fit(X_train[ordinal_features].values.reshape(-1, 1)) X_train_enc_ord = pd.DataFrame(encoder.transform(X_train[ordinal_features]), columns=ordinal_features) X_test_enc_ord = pd.DataFrame(encoder.transform(X_test[ordinal_features]), columns=ordinal_features) # Normalize feature mue = df[feature].mean() sigma = df[feature].std() df[feature] = (df[feature] - mue)/sigma # Dimensionality Reduction with PCA from sklearn.decomposition import PCA def plot_PCA_curve(X_train): pca = PCA() pca.fit(X_train) plt.figure(figsize=(7,7)) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.xlabel('cumulative explained variance') plt.plot(range(1, len(pca.explained_variance_ratio_) + 1), pca.explained_variance_ratio_) plt.xlabel('k (component)') plt.title('Percentage of variance explained by given component'); def get_n_pca_components(n, X_train, X_test): pca = PCA(n_components=n) pca.fit(X_train) X_train_t = pca.transform(X_train) X_test_t = pca.transform(X_test) return X_train_t, X_test_t import scipy from tqdm import tqdm def map_features(df, features_for_embedding, threshold=0.99): lookup_dict = {} data_mapped = df.copy(deep=True) for col in tqdm(features_for_embedding): # Replace rare values with the string 'OOV' normalized_vc = data_mapped[col].value_counts(normalize=True).cumsum() vals_to_remove = list(normalized_vc[normalized_vc > threshold].index) # print(f'Removing the following values from {col}: {vals_to_remove}') data_mapped.loc[data_mapped[col].isin(vals_to_remove), col] = 'OOV' # Create the mapping col_mapping = {k: v for k, v in enumerate(data_mapped[col].unique(), start=1) if v != 'OOV'} col_mapping[0] = 'OOV' # rerank the keys to make sure that we have no missing key: ranked_keys = scipy.stats.rankdata(list(col_mapping.keys())) - 1 reranked_col_mapping = dict(zip(ranked_keys, col_mapping.values())) # add corresponding mapped columns inverse_col_mapping = {v: k for k, v in reranked_col_mapping.items()} data_mapped[f'{col}'] = data_mapped[col].map(inverse_col_mapping) lookup_dict[col] = inverse_col_mapping # display(lookup_dict) # display(data_mapped) return data_mapped, lookup_dict def map_test(df_test: pd.DataFrame, lookup_dict: dict): data_mapped = pd.DataFrame() for key in tqdm(lookup_dict.keys()): data_mapped[key] = df_test_t[key].apply(lambda x: lookup_dict[key][x] if x in lookup_dict[key].keys() else 0) return data_mapped
0.617628
0.830937
# Predicting house prices using k-nearest neighbors regression In this notebook, we will implement k-nearest neighbors regression. You will: * Find the k-nearest neighbors of a given query input * Predict the output for the query input using the k-nearest neighbors * Choose the best value of k using a validation set ## Importing Libraries ``` import os import zipfile import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') %matplotlib inline ``` ## Unzipping files with house sales data For this notebook, we use a subset of the King County housing dataset created by randomly selecting 40% of the houses in the full dataset. ``` # Put files in current direction into a list files_list = [f for f in os.listdir('.') if os.path.isfile(f)] # Filenames of unzipped files unzip_files = ['kc_house_data_small.csv','kc_house_data_small_train.csv', 'kc_house_data_small_validation.csv', 'kc_house_data_small_test.csv' ] # If upzipped file not in files_list, unzip the file for filename in unzip_files: if filename not in files_list: zip_file = filename + '.zip' unzipping = zipfile.ZipFile(zip_file) unzipping.extractall() unzipping.close ``` ## Load house sales data ``` # Defining a dict w/ with the data type for each feature dtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':float, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int} sales = pd.read_csv('kc_house_data_small.csv', dtype=dtype_dict) ``` ## Import useful functions from previous notebooks To efficiently compute pairwise distances among data points, we will convert the DataFrame into a 2D Numpy array. First import the numpy library and then copy and paste `get_numpy_data()` from the second notebook of Week 2. ``` def get_numpy_data(input_df, features, output): input_df['constant'] = 1.0 # Adding column 'constant' to input DataFrame with all values = 1.0 features = ['constant'] + features # Adding constant' to List of features feature_matrix = input_df.as_matrix(columns=features) # Convert DataFrame w/ columns in features list in np.ndarray output_array = input_df[output].values # Convert column with output feature into np.array return(feature_matrix, output_array) ``` We will also need the `normalize_features()` function from Week 5 that normalizes all feature columns to unit norm. Paste this function below. ``` def normalize_features(feature_matrix): norms = np.linalg.norm(feature_matrix, axis=0) normalized_features = feature_matrix/norms return (normalized_features, norms) ``` ## Split data into training, test, and validation sets ``` train_data = pd.read_csv('kc_house_data_small_train.csv', dtype=dtype_dict) test_data = pd.read_csv('kc_house_data_small_test.csv', dtype=dtype_dict) validation_data = pd.read_csv('kc_house_data_validation.csv', dtype=dtype_dict) ``` ## Extract features and normalize Using all of the numerical inputs listed in `feature_list`, transform the training, test, and validation DataFrames into Numpy arrays: ``` feature_list = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated', 'lat', 'long', 'sqft_living15', 'sqft_lot15'] features_train, output_train = get_numpy_data(train_data, feature_list, 'price') features_test, output_test = get_numpy_data(test_data, feature_list, 'price') features_valid, output_valid = get_numpy_data(validation_data, feature_list, 'price') ``` In computing distances, it is crucial to normalize features. Otherwise, for example, the `sqft_living` feature (typically on the order of thousands) would exert a much larger influence on distance than the `bedrooms` feature (typically on the order of ones). We divide each column of the training feature matrix by its 2-norm, so that the transformed column has unit norm. IMPORTANT: Make sure to store the norms of the features in the training set. The features in the test and validation sets must be divided by these same norms, so that the training, test, and validation sets are normalized consistently. ``` features_train, norms = normalize_features(features_train) # normalize training set features (columns) features_test = features_test / norms # normalize test set by training set norms features_valid = features_valid / norms # normalize validation set by training set norms ``` ## Compute a single distance To start, let's just explore computing the "distance" between two given houses. We will take our **query house** to be the first house of the test set and look at the distance between this house and the 10th house of the training set. To see the features associated with the query house, print the first row (index 0) of the test feature matrix. You should get an 18-dimensional vector whose components are between 0 and 1. ``` print features_test[0] print len(features_test[0]) ``` Now print the 10th row (index 9) of the training feature matrix. Again, you get an 18-dimensional vector with components between 0 and 1. ``` print features_train[9] print len(features_train[9]) ``` ***QUIZ QUESTION *** What is the Euclidean distance between the query house and the 10th house of the training set? Note: Do not use the `np.linalg.norm` function; use `np.sqrt`, `np.sum`, and the power operator (`**`) instead. The latter approach is more easily adapted to computing multiple distances at once. ``` dist_euclid = np.sqrt( np.sum( (features_train[9] - features_test[0] )**2 ) ) print dist_euclid ``` ## Compute multiple distances Of course, to do nearest neighbor regression, we need to compute the distance between our query house and *all* houses in the training set. To visualize this nearest-neighbor search, let's first compute the distance from our query house (`features_test[0]`) to the first 10 houses of the training set (`features_train[0:10]`) and then search for the nearest neighbor within this small set of houses. Through restricting ourselves to a small set of houses to begin with, we can visually scan the list of 10 distances to verify that our code for finding the nearest neighbor is working. Write a loop to compute the Euclidean distance from the query house to each of the first 10 houses in the training set. ``` # Setting the first house as the NN min_euclid_dist = np.sqrt( np.sum( ( features_train[0] - features_test[0] )**2 ) ) min_house_index = 0 for i in range(1,10,1): curr_euclid_dist = np.sqrt( np.sum( ( features_train[i] - features_test[0] )**2 ) ) # If distance of current house < current NN, update the NN if curr_euclid_dist<min_euclid_dist: min_euclid_dist = curr_euclid_dist min_house_index = i ``` *** QUIZ QUESTION *** Among the first 10 training houses, which house is the closest to the query house? ``` print 'House', min_house_index + 1 ``` It is computationally inefficient to loop over computing distances to all houses in our training dataset. Fortunately, many of the Numpy functions can be **vectorized**, applying the same operation over multiple values or vectors. We now walk through this process. Consider the following loop that computes the element-wise difference between the features of the query house (`features_test[0]`) and the first 3 training houses (`features_train[0:3]`): ``` for i in xrange(3): print features_train[i]-features_test[0] # should print 3 vectors of length 18 ``` The subtraction operator (`-`) in Numpy is vectorized as follows: ``` print features_train[0:3] - features_test[0] ``` Note that the output of this vectorized operation is identical to that of the loop above, which can be verified below: ``` # verify that vectorization works results = features_train[0:3] - features_test[0] print results[0] - (features_train[0]-features_test[0]) # should print all 0's if results[0] == (features_train[0]-features_test[0]) print results[1] - (features_train[1]-features_test[0]) # should print all 0's if results[1] == (features_train[1]-features_test[0]) print results[2] - (features_train[2]-features_test[0]) # should print all 0's if results[2] == (features_train[2]-features_test[0]) ``` Aside: it is a good idea to write tests like this cell whenever you are vectorizing a complicated operation. ## Perform 1-nearest neighbor regression Now that we have the element-wise differences, it is not too hard to compute the Euclidean distances between our query house and all of the training houses. First, write a single-line expression to define a variable `diff` such that `diff[i]` gives the element-wise difference between the features of the query house and the `i`-th training house. ``` diff = features_train[:] - features_test[0] ``` To test the code above, run the following cell, which should output a value -0.0934339605842: ``` print diff[-1].sum() # sum of the feature differences between the query and last training house # should print -0.0934339605842 ``` The next step in computing the Euclidean distances is to take these feature-by-feature differences in `diff`, square each, and take the sum over feature indices. That is, compute the sum of square feature differences for each training house (row in `diff`). By default, `np.sum` sums up everything in the matrix and returns a single number. To instead sum only over a row or column, we need to specifiy the `axis` parameter described in the `np.sum` [documentation](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.sum.html). In particular, `axis=1` computes the sum across each row. Below, we compute this sum of square feature differences for all training houses and verify that the output for the 16th house in the training set is equivalent to having examined only the 16th row of `diff` and computing the sum of squares on that row alone. ``` print np.sum(diff**2, axis=1)[15] # take sum of squares across each row, and print the 16th sum print np.sum(diff[15]**2) # print the sum of squares for the 16th row -- should be same as above ``` With this result in mind, write a single-line expression to compute the Euclidean distances between the query house and all houses in the training set. Assign the result to a variable `distances`. **Hint**: Do not forget to take the square root of the sum of squares. ``` distances = np.sqrt( np.sum(diff**2, axis=1) ) ``` To test the code above, run the following cell, which should output a value 0.0237082324496: ``` print distances[100] # Euclidean distance between the query house and the 101th training house # should print 0.0237082324496 ``` Now you are ready to write a function that computes the distances from a query house to all training houses. The function should take two parameters: (i) the matrix of training features and (ii) the single feature vector associated with the query. ``` def compute_distances(features_instances, features_query): diff = features_instances[:] - features_query distances = np.sqrt( np.sum(diff**2, axis=1) ) return distances ``` *** QUIZ QUESTIONS *** Q1. Take the query house to be third house of the test set (`features_test[2]`). What is the index of the house in the training set that is closest to this query house? ``` dist_Q1 = compute_distances(features_train, features_test[2]) index_NN = np.where(dist_Q1 == dist_Q1.min())[0][0] print index_NN ``` Q2. What is the predicted value of the query house based on 1-nearest neighbor regression? ``` print train_data['price'][index_NN] ``` # Perform k-nearest neighbor regression For k-nearest neighbors, we need to find a *set* of k houses in the training set closest to a given query house. We then make predictions based on these k nearest neighbors. ## Fetch k-nearest neighbors Using the functions above, implement a function that takes in * the value of k; * the feature matrix for the training houses; and * the feature vector of the query house and returns the indices of the k closest training houses. For instance, with 2-nearest neighbor, a return value of [5, 10] would indicate that the 6th and 11th training houses are closest to the query house. **Hint**: Look at the [documentation for `np.argsort`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html). ``` def k_nearest_neighbors(k, feature_train, features_query): distances = compute_distances(feature_train, features_query) neighbors = np.argsort(distances)[0:k] return neighbors ``` *** QUIZ QUESTION *** Take the query house to be third house of the test set (`features_test[2]`). What are the indices of the 4 training houses closest to the query house? ``` QQ_4NN = k_nearest_neighbors(4, features_train, features_test[2]) print QQ_4NN ``` ## Make a single prediction by averaging k nearest neighbor outputs Now that we know how to find the k-nearest neighbors, write a function that predicts the value of a given query house. **For simplicity, take the average of the prices of the k nearest neighbors in the training set**. The function should have the following parameters: * the value of k; * the feature matrix for the training houses; * the output values (prices) of the training houses; and * the feature vector of the query house, whose price we are predicting. The function should return a predicted value of the query house. **Hint**: You can extract multiple items from a Numpy array using a list of indices. For instance, `output_train[[6, 10]]` returns the prices of the 7th and 11th training houses. ``` def predict_output_of_query(k, features_train, output_train, features_query): kNN = k_nearest_neighbors(k, features_train, features_query) prediction = np.average(output_train[kNN]) return prediction ``` *** QUIZ QUESTION *** Again taking the query house to be third house of the test set (`features_test[2]`), predict the value of the query house using k-nearest neighbors with `k=4` and the simple averaging method described and implemented above. ``` QQ_pred = predict_output_of_query(4, features_train, train_data['price'].values, features_test[2]) print QQ_pred ``` Compare this predicted value using 4-nearest neighbors to the predicted value using 1-nearest neighbor computed earlier. ``` print '1-NN prediction: ', train_data['price'][382] print '4-NN prediction: ', QQ_pred ``` ## Make multiple predictions Write a function to predict the value of *each and every* house in a query set. (The query set can be any subset of the dataset, be it the test set or validation set.) The idea is to have a loop where we take each house in the query set as the query house and make a prediction for that specific house. The new function should take the following parameters: * the value of k; * the feature matrix for the training houses; * the output values (prices) of the training houses; and * the feature matrix for the query set. The function should return a set of predicted values, one for each house in the query set. **Hint**: To get the number of houses in the query set, use the `.shape` field of the query features matrix. See [the documentation](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.ndarray.shape.html). ``` def predict_output(k, features_train, output_train, features_query): predictions = np.zeros(features_query.shape[0]) for i in range(len(predictions)): predictions[i] = predict_output_of_query(k, features_train, output_train, features_query[i]) return predictions ``` *** QUIZ QUESTION *** Make predictions for the first 10 houses in the test set using k-nearest neighbors with `k=10`. Q1. What is the index of the house in this query set that has the lowest predicted value? ``` QQ_10_preds = predict_output(10, features_train, train_data['price'].values, features_test[0:10]) index_low_pred = np.where(QQ_10_preds == QQ_10_preds.min())[0][0] print index_low_pred ``` Q2. What is the predicted value of this house? ``` print QQ_10_preds[index_low_pred] ``` ## Choosing the best value of k using a validation set There remains a question of choosing the value of k to use in making predictions. Here, we use a validation set to choose this value. Write a loop that does the following: * For `k` in [1, 2, ..., 15]: * Makes predictions for each house in the VALIDATION set using the k-nearest neighbors from the TRAINING set. * Computes the RSS for these predictions on the VALIDATION set * Stores the RSS computed above in `rss_all` * Report which `k` produced the lowest RSS on VALIDATION set. (Depending on your computing environment, this computation may take 10-15 minutes.) ``` kvals = range(1, 16) rss_all = np.zeros(len(kvals)) for i in range(len(kvals)): pred_vals = predict_output(kvals[i], features_train, train_data['price'].values, features_valid) rss_all[i] = sum( (pred_vals- validation_data['price'].values)**2 ) index_min_rss = np.where(rss_all == rss_all.min())[0][0] print 'Value of k which produces the lowest RSS on VALIDATION set: ', kvals[index_min_rss] ``` To visualize the performance as a function of `k`, plot the RSS on the VALIDATION set for each considered `k` value: ``` plt.figure(figsize=(8,6)) plt.plot(kvals, rss_all,'bo-') plt.xlabel('k-nearest neighbors used', fontsize=16) plt.ylabel('Residual Sum of Squares', fontsize=16) plt.title('k vs. RSS on Validation Dataset', fontsize=18) plt.show() ``` ***QUIZ QUESTION *** What is the RSS on the TEST data using the value of k found above? To be clear, sum over all houses in the TEST set. ``` pred_vals_test = predict_output(8, features_train, train_data['price'].values, features_test) rss_test = sum( (pred_vals_test- test_data['price'].values)**2 ) print '%.2e' % rss_test ```
github_jupyter
import os import zipfile import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') %matplotlib inline # Put files in current direction into a list files_list = [f for f in os.listdir('.') if os.path.isfile(f)] # Filenames of unzipped files unzip_files = ['kc_house_data_small.csv','kc_house_data_small_train.csv', 'kc_house_data_small_validation.csv', 'kc_house_data_small_test.csv' ] # If upzipped file not in files_list, unzip the file for filename in unzip_files: if filename not in files_list: zip_file = filename + '.zip' unzipping = zipfile.ZipFile(zip_file) unzipping.extractall() unzipping.close # Defining a dict w/ with the data type for each feature dtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':float, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int} sales = pd.read_csv('kc_house_data_small.csv', dtype=dtype_dict) def get_numpy_data(input_df, features, output): input_df['constant'] = 1.0 # Adding column 'constant' to input DataFrame with all values = 1.0 features = ['constant'] + features # Adding constant' to List of features feature_matrix = input_df.as_matrix(columns=features) # Convert DataFrame w/ columns in features list in np.ndarray output_array = input_df[output].values # Convert column with output feature into np.array return(feature_matrix, output_array) def normalize_features(feature_matrix): norms = np.linalg.norm(feature_matrix, axis=0) normalized_features = feature_matrix/norms return (normalized_features, norms) train_data = pd.read_csv('kc_house_data_small_train.csv', dtype=dtype_dict) test_data = pd.read_csv('kc_house_data_small_test.csv', dtype=dtype_dict) validation_data = pd.read_csv('kc_house_data_validation.csv', dtype=dtype_dict) feature_list = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated', 'lat', 'long', 'sqft_living15', 'sqft_lot15'] features_train, output_train = get_numpy_data(train_data, feature_list, 'price') features_test, output_test = get_numpy_data(test_data, feature_list, 'price') features_valid, output_valid = get_numpy_data(validation_data, feature_list, 'price') features_train, norms = normalize_features(features_train) # normalize training set features (columns) features_test = features_test / norms # normalize test set by training set norms features_valid = features_valid / norms # normalize validation set by training set norms print features_test[0] print len(features_test[0]) print features_train[9] print len(features_train[9]) dist_euclid = np.sqrt( np.sum( (features_train[9] - features_test[0] )**2 ) ) print dist_euclid # Setting the first house as the NN min_euclid_dist = np.sqrt( np.sum( ( features_train[0] - features_test[0] )**2 ) ) min_house_index = 0 for i in range(1,10,1): curr_euclid_dist = np.sqrt( np.sum( ( features_train[i] - features_test[0] )**2 ) ) # If distance of current house < current NN, update the NN if curr_euclid_dist<min_euclid_dist: min_euclid_dist = curr_euclid_dist min_house_index = i print 'House', min_house_index + 1 for i in xrange(3): print features_train[i]-features_test[0] # should print 3 vectors of length 18 print features_train[0:3] - features_test[0] # verify that vectorization works results = features_train[0:3] - features_test[0] print results[0] - (features_train[0]-features_test[0]) # should print all 0's if results[0] == (features_train[0]-features_test[0]) print results[1] - (features_train[1]-features_test[0]) # should print all 0's if results[1] == (features_train[1]-features_test[0]) print results[2] - (features_train[2]-features_test[0]) # should print all 0's if results[2] == (features_train[2]-features_test[0]) diff = features_train[:] - features_test[0] print diff[-1].sum() # sum of the feature differences between the query and last training house # should print -0.0934339605842 print np.sum(diff**2, axis=1)[15] # take sum of squares across each row, and print the 16th sum print np.sum(diff[15]**2) # print the sum of squares for the 16th row -- should be same as above distances = np.sqrt( np.sum(diff**2, axis=1) ) print distances[100] # Euclidean distance between the query house and the 101th training house # should print 0.0237082324496 def compute_distances(features_instances, features_query): diff = features_instances[:] - features_query distances = np.sqrt( np.sum(diff**2, axis=1) ) return distances dist_Q1 = compute_distances(features_train, features_test[2]) index_NN = np.where(dist_Q1 == dist_Q1.min())[0][0] print index_NN print train_data['price'][index_NN] def k_nearest_neighbors(k, feature_train, features_query): distances = compute_distances(feature_train, features_query) neighbors = np.argsort(distances)[0:k] return neighbors QQ_4NN = k_nearest_neighbors(4, features_train, features_test[2]) print QQ_4NN def predict_output_of_query(k, features_train, output_train, features_query): kNN = k_nearest_neighbors(k, features_train, features_query) prediction = np.average(output_train[kNN]) return prediction QQ_pred = predict_output_of_query(4, features_train, train_data['price'].values, features_test[2]) print QQ_pred print '1-NN prediction: ', train_data['price'][382] print '4-NN prediction: ', QQ_pred def predict_output(k, features_train, output_train, features_query): predictions = np.zeros(features_query.shape[0]) for i in range(len(predictions)): predictions[i] = predict_output_of_query(k, features_train, output_train, features_query[i]) return predictions QQ_10_preds = predict_output(10, features_train, train_data['price'].values, features_test[0:10]) index_low_pred = np.where(QQ_10_preds == QQ_10_preds.min())[0][0] print index_low_pred print QQ_10_preds[index_low_pred] kvals = range(1, 16) rss_all = np.zeros(len(kvals)) for i in range(len(kvals)): pred_vals = predict_output(kvals[i], features_train, train_data['price'].values, features_valid) rss_all[i] = sum( (pred_vals- validation_data['price'].values)**2 ) index_min_rss = np.where(rss_all == rss_all.min())[0][0] print 'Value of k which produces the lowest RSS on VALIDATION set: ', kvals[index_min_rss] plt.figure(figsize=(8,6)) plt.plot(kvals, rss_all,'bo-') plt.xlabel('k-nearest neighbors used', fontsize=16) plt.ylabel('Residual Sum of Squares', fontsize=16) plt.title('k vs. RSS on Validation Dataset', fontsize=18) plt.show() pred_vals_test = predict_output(8, features_train, train_data['price'].values, features_test) rss_test = sum( (pred_vals_test- test_data['price'].values)**2 ) print '%.2e' % rss_test
0.403332
0.975178
# Data 512 Final Project Proposal Xiaolu Qian <br>11/5/2020 ### Motivation and problem statement According to United Nations’ statistics, more than half of the population in the world are living in urban regions, and urbanization keeps spreading throughout the world. Regional opportunity structures attract a lot of investigations of the urban labor market at a micro level. High-income level and fast income growth of workers in populated regions are in contrast with the wage level of workers in the rural area and less dense places. A vast literature studies the urban wage premium, particularly in the U.S. context. China led the global urbanization of the past few decades and according to the World Bank is expected to have 70% of its population living in urban areas by the year 2030. Although China has been going through such a dramatic scale of urbanization, currently there is not a lot of literature on urban wage premium in China. The goal of this project is to investigate China's urban wage premium with a special focus on its provincial capitals. My goal is to determine if there exist urban/provincial capital agglomeration effects in China that contribute to the wage premium accounting for the cost of living and the unobservable ability bias of workers. I also want to see the wage gap between male and female in the urban, rural setting. ### Data selected for analysis I will be using data from the China Health and Nutrition Survey (CHNS) https://www.cpc.unc.edu/projects/china, a nationally representative sample of Chinese population running from 1997 to 2015. While there is a lot of restricted access to Chinese government population statistics, the CHNS covers 15 representative provinces that vary substantially in demography, economics, and public resources. It used a multistage, random cluster process in drawing sample statistics from each province. The overall survey contains about 7,200 households and covers roughly 30,000 individuals with residential location types classified as urban neighborhoods, rural villages, county town neighborhoods, and suburban villages. In urban areas, the provincial capital and a lower income city were selected randomly if possible. In rural areas, counties were stratified by income, and a weighted scheme was used to randomly select four counties within each province. This includes detailed household and individual economic and social information. This survey data is gathered by UNC and is publicly available to be used. According to the survey website, “the survey was conducted by an international team of researchers whose backgrounds include nutrition, public health, economics, sociology, Chinese studies, and demography. The survey took place over a 7-day period using a multistage, random cluster process to draw a sample of about 7,200 households with over 30,000 individuals in 15 provinces and municipal cities that vary substantially in geography, economic development, public resources, and health indicators. In addition, detailed community data were collected in surveys of food markets, health facilities, family planning officials, and other social services and community leaders.” There is no need to worry about the ethical considerations to use this dataset. ### Unknowns and dependencies There might be potential difficulties to find the right method to find the casual relationship of urban wage premium. Even though follow-up levels in this dataset are high, families that migrate from one community to a new one are not recorded as movers exit the sample. The CHNS does not provide the floating population information since it stopped keeping track of people once they moved. Conducting an OLS regression model by controlling for different characteristics that might cause biases would also give a good prediction for the urban wage premium in provincial capitals in China.
github_jupyter
# Data 512 Final Project Proposal Xiaolu Qian <br>11/5/2020 ### Motivation and problem statement According to United Nations’ statistics, more than half of the population in the world are living in urban regions, and urbanization keeps spreading throughout the world. Regional opportunity structures attract a lot of investigations of the urban labor market at a micro level. High-income level and fast income growth of workers in populated regions are in contrast with the wage level of workers in the rural area and less dense places. A vast literature studies the urban wage premium, particularly in the U.S. context. China led the global urbanization of the past few decades and according to the World Bank is expected to have 70% of its population living in urban areas by the year 2030. Although China has been going through such a dramatic scale of urbanization, currently there is not a lot of literature on urban wage premium in China. The goal of this project is to investigate China's urban wage premium with a special focus on its provincial capitals. My goal is to determine if there exist urban/provincial capital agglomeration effects in China that contribute to the wage premium accounting for the cost of living and the unobservable ability bias of workers. I also want to see the wage gap between male and female in the urban, rural setting. ### Data selected for analysis I will be using data from the China Health and Nutrition Survey (CHNS) https://www.cpc.unc.edu/projects/china, a nationally representative sample of Chinese population running from 1997 to 2015. While there is a lot of restricted access to Chinese government population statistics, the CHNS covers 15 representative provinces that vary substantially in demography, economics, and public resources. It used a multistage, random cluster process in drawing sample statistics from each province. The overall survey contains about 7,200 households and covers roughly 30,000 individuals with residential location types classified as urban neighborhoods, rural villages, county town neighborhoods, and suburban villages. In urban areas, the provincial capital and a lower income city were selected randomly if possible. In rural areas, counties were stratified by income, and a weighted scheme was used to randomly select four counties within each province. This includes detailed household and individual economic and social information. This survey data is gathered by UNC and is publicly available to be used. According to the survey website, “the survey was conducted by an international team of researchers whose backgrounds include nutrition, public health, economics, sociology, Chinese studies, and demography. The survey took place over a 7-day period using a multistage, random cluster process to draw a sample of about 7,200 households with over 30,000 individuals in 15 provinces and municipal cities that vary substantially in geography, economic development, public resources, and health indicators. In addition, detailed community data were collected in surveys of food markets, health facilities, family planning officials, and other social services and community leaders.” There is no need to worry about the ethical considerations to use this dataset. ### Unknowns and dependencies There might be potential difficulties to find the right method to find the casual relationship of urban wage premium. Even though follow-up levels in this dataset are high, families that migrate from one community to a new one are not recorded as movers exit the sample. The CHNS does not provide the floating population information since it stopped keeping track of people once they moved. Conducting an OLS regression model by controlling for different characteristics that might cause biases would also give a good prediction for the urban wage premium in provincial capitals in China.
0.601359
0.989358
## Dependencies ``` from openvaccine_scripts import * import warnings, json from sklearn.model_selection import KFold, StratifiedKFold import tensorflow.keras.layers as L import tensorflow.keras.backend as K from tensorflow.keras import optimizers, losses, Model from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau SEED = 0 seed_everything(SEED) warnings.filterwarnings('ignore') ``` # Model parameters ``` config = { "BATCH_SIZE": 64, "EPOCHS": 120, "LEARNING_RATE": 1e-3, "ES_PATIENCE": 10, "N_FOLDS": 5, "N_USED_FOLDS": 5, "PB_SEQ_LEN": 107, "PV_SEQ_LEN": 130, } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) config ``` # Load data ``` database_base_path = '/kaggle/input/stanford-covid-vaccine/' train = pd.read_json(database_base_path + 'train.json', lines=True) test = pd.read_json(database_base_path + 'test.json', lines=True) print('Train samples: %d' % len(train)) display(train.head()) print(f'Test samples: {len(test)}') display(test.head()) ``` ## Auxiliary functions ``` def get_dataset(x, y=None, sample_weights=None, labeled=True, shuffled=True, batch_size=32, buffer_size=-1, seed=0): input_map = {'inputs_seq': x['sequence'], 'inputs_struct': x['structure'], 'inputs_loop': x['predicted_loop_type'], 'inputs_bpps_max': x['bpps_max'], 'inputs_bpps_sum': x['bpps_sum'], 'inputs_bpps_mean': x['bpps_mean'], 'inputs_bpps_scaled': x['bpps_scaled']} if labeled: output_map = {'output_react': y['reactivity'], 'output_bg_ph': y['deg_Mg_pH10'], 'output_ph': y['deg_pH10'], 'output_mg_c': y['deg_Mg_50C'], 'output_c': y['deg_50C']} if sample_weights is not None: dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map, sample_weights)) else: dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map)) else: dataset = tf.data.Dataset.from_tensor_slices((input_map)) if shuffled: dataset = dataset.shuffle(2048, seed=seed) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(buffer_size) return dataset ``` # Model ``` def model_fn(embed_dim=100, hidden_dim=384, dropout=.5, pred_len=68, n_outputs=5): inputs_seq = L.Input(shape=(None, 1), name='inputs_seq') inputs_struct = L.Input(shape=(None, 1), name='inputs_struct') inputs_loop = L.Input(shape=(None, 1), name='inputs_loop') inputs_bpps_max = L.Input(shape=(None, 1), name='inputs_bpps_max') inputs_bpps_sum = L.Input(shape=(None, 1), name='inputs_bpps_sum') inputs_bpps_mean = L.Input(shape=(None, 1), name='inputs_bpps_mean') inputs_bpps_scaled = L.Input(shape=(None, 1), name='inputs_bpps_scaled') shared_embed = L.Embedding(input_dim=len(token2int), output_dim=embed_dim, name='shared_embedding') embed_seq = shared_embed(inputs_seq) embed_struct = shared_embed(inputs_struct) embed_loop = shared_embed(inputs_loop) embed_concat = L.concatenate([embed_seq, embed_struct, embed_loop], axis=2, name='embedding_concatenate') embed_reshaped = L.Reshape((-1, embed_concat.shape[2]*embed_concat.shape[3]))(embed_concat) x_concat = L.concatenate([embed_reshaped, inputs_bpps_max, inputs_bpps_sum, inputs_bpps_mean, inputs_bpps_scaled], axis=-1, name='features_concatenate') # Recurrent block x = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x_concat) x_rec = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x) x = L.Add()([x_rec, x]) x_rec = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x) x = L.Add()([x_rec, x]) # Since we are only making predictions on the first part of each sequence, we have to truncate it x_truncated = x[:, :pred_len] output_react = L.Dense(1, activation='linear', name='output_react')(x_truncated) output_bg_ph = L.Dense(1, activation='linear', name='output_bg_ph')(x_truncated) output_ph = L.Dense(1, activation='linear', name='output_ph')(x_truncated) output_mg_c = L.Dense(1, activation='linear', name='output_mg_c')(x_truncated) output_c = L.Dense(1, activation='linear', name='output_c')(x_truncated) model = Model(inputs=[inputs_seq, inputs_struct, inputs_loop, inputs_bpps_max, inputs_bpps_sum, inputs_bpps_mean, inputs_bpps_scaled], outputs=[output_react, output_bg_ph, output_ph, output_mg_c, output_c]) opt = optimizers.Adam(learning_rate=config['LEARNING_RATE']) model.compile(optimizer=opt, loss={'output_react': MCRMSE, 'output_bg_ph': MCRMSE, 'output_ph': MCRMSE, 'output_mg_c': MCRMSE, 'output_c': MCRMSE}, loss_weights={'output_react': 2., 'output_bg_ph': 2., 'output_ph': 1., 'output_mg_c': 2., 'output_c': 1.}) return model model = model_fn() model.summary() ``` # Pre-process ``` # Add bpps as features bpps_max = [] bpps_sum = [] bpps_mean = [] bpps_scaled = [] bpps_nb_mean = 0.077522 # mean of bpps_nb across all training data bpps_nb_std = 0.08914 # std of bpps_nb across all training data for row in train.itertuples(): probability = np.load(f'{database_base_path}/bpps/{row.id}.npy') bpps_max.append(probability.max(-1).tolist()) bpps_sum.append((1-probability.sum(-1)).tolist()) bpps_mean.append((1-probability.mean(-1)).tolist()) # bpps nb bpps_nb = (probability > 0).sum(axis=0) / probability.shape[0] bpps_nb = (bpps_nb - bpps_nb_mean) / bpps_nb_std bpps_scaled.append(bpps_nb) train = train.assign(bpps_max=bpps_max, bpps_sum=bpps_sum, bpps_mean=bpps_mean, bpps_scaled=bpps_scaled) bpps_max = [] bpps_sum = [] bpps_mean = [] bpps_scaled = [] for row in test.itertuples(): probability = np.load(f'{database_base_path}/bpps/{row.id}.npy') bpps_max.append(probability.max(-1).tolist()) bpps_sum.append((1-probability.sum(-1)).tolist()) bpps_mean.append((1-probability.mean(-1)).tolist()) # bpps nb bpps_nb = (probability > 0).sum(axis=0) / probability.shape[0] bpps_nb = (bpps_nb - bpps_nb_mean) / bpps_nb_std bpps_scaled.append(bpps_nb) test = test.assign(bpps_max=bpps_max, bpps_sum=bpps_sum, bpps_mean=bpps_mean, bpps_scaled=bpps_scaled) feature_cols = ['sequence', 'structure', 'predicted_loop_type', 'bpps_max', 'bpps_sum', 'bpps_mean', 'bpps_scaled'] pred_cols = ['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C', 'deg_50C'] encoder_list = [token2int_seq, token2int_struct, token2int_loop, None, None, None, None] public_test = test.query("seq_length == 107").copy() private_test = test.query("seq_length == 130").copy() x_test_public = get_features_dict(public_test, feature_cols, encoder_list, public_test.index) x_test_private = get_features_dict(private_test, feature_cols, encoder_list, private_test.index) # To use as stratified col train['signal_to_noise_int'] = train['signal_to_noise'].astype(int) ``` # Training ``` AUTO = tf.data.experimental.AUTOTUNE skf = KFold(n_splits=config['N_USED_FOLDS'], shuffle=True, random_state=SEED) history_list = [] oof = train[['id', 'SN_filter', 'signal_to_noise'] + pred_cols].copy() oof_preds = np.zeros((len(train), 68, len(pred_cols))) test_public_preds = np.zeros((len(public_test), config['PB_SEQ_LEN'], len(pred_cols))) test_private_preds = np.zeros((len(private_test), config['PV_SEQ_LEN'], len(pred_cols))) for fold,(train_idx, valid_idx) in enumerate(skf.split(train['signal_to_noise_int'])): if fold >= config['N_USED_FOLDS']: break print(f'\nFOLD: {fold+1}') ### Create datasets x_train = get_features_dict(train, feature_cols, encoder_list, train_idx) x_valid = get_features_dict(train, feature_cols, encoder_list, valid_idx) y_train = get_targets_dict(train, pred_cols, train_idx) y_valid = get_targets_dict(train, pred_cols, valid_idx) w_train = np.log(train.iloc[train_idx]['signal_to_noise'].values+1.2)+1 w_valid = np.log(train.iloc[valid_idx]['signal_to_noise'].values+1.2)+1 train_ds = get_dataset(x_train, y_train, w_train, labeled=True, shuffled=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) valid_ds = get_dataset(x_valid, y_valid, w_valid, labeled=True, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) oof_ds = get_dataset(get_features_dict(train, feature_cols, encoder_list, valid_idx), labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) test_public_ds = get_dataset(x_test_public, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) test_private_ds = get_dataset(x_test_private, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) ### Model K.clear_session() model = model_fn() model_path = f'model_{fold}.h5' es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], restore_best_weights=True, verbose=1) rlrp = ReduceLROnPlateau(monitor='val_loss', mode='min', factor=0.1, patience=5, verbose=1) ### Train history = model.fit(train_ds, validation_data=valid_ds, callbacks=[es, rlrp], epochs=config['EPOCHS'], batch_size=config['BATCH_SIZE'], verbose=2).history history_list.append(history) # Save last model weights model.save_weights(model_path) ### Inference oof_ds_preds = np.array(model.predict(oof_ds)).reshape((len(pred_cols), len(valid_idx), 68)).transpose((1, 2, 0)) oof_preds[valid_idx] = oof_ds_preds # Short sequence (public test) model = model_fn(pred_len=config['PB_SEQ_LEN']) model.load_weights(model_path) test_public_ds_preds = np.array(model.predict(test_public_ds)).reshape((len(pred_cols), len(public_test), config['PB_SEQ_LEN'])).transpose((1, 2, 0)) test_public_preds += test_public_ds_preds * (1 / config['N_USED_FOLDS']) # Long sequence (private test) model = model_fn(pred_len=config['PV_SEQ_LEN']) model.load_weights(model_path) test_private_ds_preds = np.array(model.predict(test_private_ds)).reshape((len(pred_cols), len(private_test), config['PV_SEQ_LEN'])).transpose((1, 2, 0)) test_private_preds += test_private_ds_preds * (1 / config['N_USED_FOLDS']) ``` ## Model loss graph ``` for fold, history in enumerate(history_list): print(f'\nFOLD: {fold+1}') print(f"Train {np.array(history['loss']).min():.5f} Validation {np.array(history['val_loss']).min():.5f}") plot_metrics_agg(history_list) ``` # Post-processing ``` # Assign preds to OOF set for idx, col in enumerate(pred_cols): val = oof_preds[:, :, idx] oof = oof.assign(**{f'{col}_pred': list(val)}) oof.to_csv('oof.csv', index=False) oof_preds_dict = {} for col in pred_cols: oof_preds_dict[col] = oof_preds[:, :, idx] # Assign values to test set preds_ls = [] for df, preds in [(public_test, test_public_preds), (private_test, test_private_preds)]: for i, uid in enumerate(df.id): single_pred = preds[i] single_df = pd.DataFrame(single_pred, columns=pred_cols) single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])] preds_ls.append(single_df) preds_df = pd.concat(preds_ls) ``` # Model evaluation ``` y_true_dict = get_targets_dict(train, pred_cols, train.index) y_true = np.array([y_true_dict[col] for col in pred_cols]).transpose((1, 2, 0, 3)).reshape(oof_preds.shape) display(evaluate_model(train, y_true, oof_preds, pred_cols)) ``` # Visualize test predictions ``` submission = pd.read_csv(database_base_path + 'sample_submission.csv') submission = submission[['id_seqpos']].merge(preds_df, on=['id_seqpos']) ``` # Test set predictions ``` display(submission.head(10)) display(submission.describe()) submission.to_csv('submission.csv', index=False) ```
github_jupyter
from openvaccine_scripts import * import warnings, json from sklearn.model_selection import KFold, StratifiedKFold import tensorflow.keras.layers as L import tensorflow.keras.backend as K from tensorflow.keras import optimizers, losses, Model from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau SEED = 0 seed_everything(SEED) warnings.filterwarnings('ignore') config = { "BATCH_SIZE": 64, "EPOCHS": 120, "LEARNING_RATE": 1e-3, "ES_PATIENCE": 10, "N_FOLDS": 5, "N_USED_FOLDS": 5, "PB_SEQ_LEN": 107, "PV_SEQ_LEN": 130, } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) config database_base_path = '/kaggle/input/stanford-covid-vaccine/' train = pd.read_json(database_base_path + 'train.json', lines=True) test = pd.read_json(database_base_path + 'test.json', lines=True) print('Train samples: %d' % len(train)) display(train.head()) print(f'Test samples: {len(test)}') display(test.head()) def get_dataset(x, y=None, sample_weights=None, labeled=True, shuffled=True, batch_size=32, buffer_size=-1, seed=0): input_map = {'inputs_seq': x['sequence'], 'inputs_struct': x['structure'], 'inputs_loop': x['predicted_loop_type'], 'inputs_bpps_max': x['bpps_max'], 'inputs_bpps_sum': x['bpps_sum'], 'inputs_bpps_mean': x['bpps_mean'], 'inputs_bpps_scaled': x['bpps_scaled']} if labeled: output_map = {'output_react': y['reactivity'], 'output_bg_ph': y['deg_Mg_pH10'], 'output_ph': y['deg_pH10'], 'output_mg_c': y['deg_Mg_50C'], 'output_c': y['deg_50C']} if sample_weights is not None: dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map, sample_weights)) else: dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map)) else: dataset = tf.data.Dataset.from_tensor_slices((input_map)) if shuffled: dataset = dataset.shuffle(2048, seed=seed) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(buffer_size) return dataset def model_fn(embed_dim=100, hidden_dim=384, dropout=.5, pred_len=68, n_outputs=5): inputs_seq = L.Input(shape=(None, 1), name='inputs_seq') inputs_struct = L.Input(shape=(None, 1), name='inputs_struct') inputs_loop = L.Input(shape=(None, 1), name='inputs_loop') inputs_bpps_max = L.Input(shape=(None, 1), name='inputs_bpps_max') inputs_bpps_sum = L.Input(shape=(None, 1), name='inputs_bpps_sum') inputs_bpps_mean = L.Input(shape=(None, 1), name='inputs_bpps_mean') inputs_bpps_scaled = L.Input(shape=(None, 1), name='inputs_bpps_scaled') shared_embed = L.Embedding(input_dim=len(token2int), output_dim=embed_dim, name='shared_embedding') embed_seq = shared_embed(inputs_seq) embed_struct = shared_embed(inputs_struct) embed_loop = shared_embed(inputs_loop) embed_concat = L.concatenate([embed_seq, embed_struct, embed_loop], axis=2, name='embedding_concatenate') embed_reshaped = L.Reshape((-1, embed_concat.shape[2]*embed_concat.shape[3]))(embed_concat) x_concat = L.concatenate([embed_reshaped, inputs_bpps_max, inputs_bpps_sum, inputs_bpps_mean, inputs_bpps_scaled], axis=-1, name='features_concatenate') # Recurrent block x = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x_concat) x_rec = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x) x = L.Add()([x_rec, x]) x_rec = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x) x = L.Add()([x_rec, x]) # Since we are only making predictions on the first part of each sequence, we have to truncate it x_truncated = x[:, :pred_len] output_react = L.Dense(1, activation='linear', name='output_react')(x_truncated) output_bg_ph = L.Dense(1, activation='linear', name='output_bg_ph')(x_truncated) output_ph = L.Dense(1, activation='linear', name='output_ph')(x_truncated) output_mg_c = L.Dense(1, activation='linear', name='output_mg_c')(x_truncated) output_c = L.Dense(1, activation='linear', name='output_c')(x_truncated) model = Model(inputs=[inputs_seq, inputs_struct, inputs_loop, inputs_bpps_max, inputs_bpps_sum, inputs_bpps_mean, inputs_bpps_scaled], outputs=[output_react, output_bg_ph, output_ph, output_mg_c, output_c]) opt = optimizers.Adam(learning_rate=config['LEARNING_RATE']) model.compile(optimizer=opt, loss={'output_react': MCRMSE, 'output_bg_ph': MCRMSE, 'output_ph': MCRMSE, 'output_mg_c': MCRMSE, 'output_c': MCRMSE}, loss_weights={'output_react': 2., 'output_bg_ph': 2., 'output_ph': 1., 'output_mg_c': 2., 'output_c': 1.}) return model model = model_fn() model.summary() # Add bpps as features bpps_max = [] bpps_sum = [] bpps_mean = [] bpps_scaled = [] bpps_nb_mean = 0.077522 # mean of bpps_nb across all training data bpps_nb_std = 0.08914 # std of bpps_nb across all training data for row in train.itertuples(): probability = np.load(f'{database_base_path}/bpps/{row.id}.npy') bpps_max.append(probability.max(-1).tolist()) bpps_sum.append((1-probability.sum(-1)).tolist()) bpps_mean.append((1-probability.mean(-1)).tolist()) # bpps nb bpps_nb = (probability > 0).sum(axis=0) / probability.shape[0] bpps_nb = (bpps_nb - bpps_nb_mean) / bpps_nb_std bpps_scaled.append(bpps_nb) train = train.assign(bpps_max=bpps_max, bpps_sum=bpps_sum, bpps_mean=bpps_mean, bpps_scaled=bpps_scaled) bpps_max = [] bpps_sum = [] bpps_mean = [] bpps_scaled = [] for row in test.itertuples(): probability = np.load(f'{database_base_path}/bpps/{row.id}.npy') bpps_max.append(probability.max(-1).tolist()) bpps_sum.append((1-probability.sum(-1)).tolist()) bpps_mean.append((1-probability.mean(-1)).tolist()) # bpps nb bpps_nb = (probability > 0).sum(axis=0) / probability.shape[0] bpps_nb = (bpps_nb - bpps_nb_mean) / bpps_nb_std bpps_scaled.append(bpps_nb) test = test.assign(bpps_max=bpps_max, bpps_sum=bpps_sum, bpps_mean=bpps_mean, bpps_scaled=bpps_scaled) feature_cols = ['sequence', 'structure', 'predicted_loop_type', 'bpps_max', 'bpps_sum', 'bpps_mean', 'bpps_scaled'] pred_cols = ['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C', 'deg_50C'] encoder_list = [token2int_seq, token2int_struct, token2int_loop, None, None, None, None] public_test = test.query("seq_length == 107").copy() private_test = test.query("seq_length == 130").copy() x_test_public = get_features_dict(public_test, feature_cols, encoder_list, public_test.index) x_test_private = get_features_dict(private_test, feature_cols, encoder_list, private_test.index) # To use as stratified col train['signal_to_noise_int'] = train['signal_to_noise'].astype(int) AUTO = tf.data.experimental.AUTOTUNE skf = KFold(n_splits=config['N_USED_FOLDS'], shuffle=True, random_state=SEED) history_list = [] oof = train[['id', 'SN_filter', 'signal_to_noise'] + pred_cols].copy() oof_preds = np.zeros((len(train), 68, len(pred_cols))) test_public_preds = np.zeros((len(public_test), config['PB_SEQ_LEN'], len(pred_cols))) test_private_preds = np.zeros((len(private_test), config['PV_SEQ_LEN'], len(pred_cols))) for fold,(train_idx, valid_idx) in enumerate(skf.split(train['signal_to_noise_int'])): if fold >= config['N_USED_FOLDS']: break print(f'\nFOLD: {fold+1}') ### Create datasets x_train = get_features_dict(train, feature_cols, encoder_list, train_idx) x_valid = get_features_dict(train, feature_cols, encoder_list, valid_idx) y_train = get_targets_dict(train, pred_cols, train_idx) y_valid = get_targets_dict(train, pred_cols, valid_idx) w_train = np.log(train.iloc[train_idx]['signal_to_noise'].values+1.2)+1 w_valid = np.log(train.iloc[valid_idx]['signal_to_noise'].values+1.2)+1 train_ds = get_dataset(x_train, y_train, w_train, labeled=True, shuffled=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) valid_ds = get_dataset(x_valid, y_valid, w_valid, labeled=True, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) oof_ds = get_dataset(get_features_dict(train, feature_cols, encoder_list, valid_idx), labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) test_public_ds = get_dataset(x_test_public, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) test_private_ds = get_dataset(x_test_private, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) ### Model K.clear_session() model = model_fn() model_path = f'model_{fold}.h5' es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], restore_best_weights=True, verbose=1) rlrp = ReduceLROnPlateau(monitor='val_loss', mode='min', factor=0.1, patience=5, verbose=1) ### Train history = model.fit(train_ds, validation_data=valid_ds, callbacks=[es, rlrp], epochs=config['EPOCHS'], batch_size=config['BATCH_SIZE'], verbose=2).history history_list.append(history) # Save last model weights model.save_weights(model_path) ### Inference oof_ds_preds = np.array(model.predict(oof_ds)).reshape((len(pred_cols), len(valid_idx), 68)).transpose((1, 2, 0)) oof_preds[valid_idx] = oof_ds_preds # Short sequence (public test) model = model_fn(pred_len=config['PB_SEQ_LEN']) model.load_weights(model_path) test_public_ds_preds = np.array(model.predict(test_public_ds)).reshape((len(pred_cols), len(public_test), config['PB_SEQ_LEN'])).transpose((1, 2, 0)) test_public_preds += test_public_ds_preds * (1 / config['N_USED_FOLDS']) # Long sequence (private test) model = model_fn(pred_len=config['PV_SEQ_LEN']) model.load_weights(model_path) test_private_ds_preds = np.array(model.predict(test_private_ds)).reshape((len(pred_cols), len(private_test), config['PV_SEQ_LEN'])).transpose((1, 2, 0)) test_private_preds += test_private_ds_preds * (1 / config['N_USED_FOLDS']) for fold, history in enumerate(history_list): print(f'\nFOLD: {fold+1}') print(f"Train {np.array(history['loss']).min():.5f} Validation {np.array(history['val_loss']).min():.5f}") plot_metrics_agg(history_list) # Assign preds to OOF set for idx, col in enumerate(pred_cols): val = oof_preds[:, :, idx] oof = oof.assign(**{f'{col}_pred': list(val)}) oof.to_csv('oof.csv', index=False) oof_preds_dict = {} for col in pred_cols: oof_preds_dict[col] = oof_preds[:, :, idx] # Assign values to test set preds_ls = [] for df, preds in [(public_test, test_public_preds), (private_test, test_private_preds)]: for i, uid in enumerate(df.id): single_pred = preds[i] single_df = pd.DataFrame(single_pred, columns=pred_cols) single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])] preds_ls.append(single_df) preds_df = pd.concat(preds_ls) y_true_dict = get_targets_dict(train, pred_cols, train.index) y_true = np.array([y_true_dict[col] for col in pred_cols]).transpose((1, 2, 0, 3)).reshape(oof_preds.shape) display(evaluate_model(train, y_true, oof_preds, pred_cols)) submission = pd.read_csv(database_base_path + 'sample_submission.csv') submission = submission[['id_seqpos']].merge(preds_df, on=['id_seqpos']) display(submission.head(10)) display(submission.describe()) submission.to_csv('submission.csv', index=False)
0.621656
0.583797
# PyData.Tokyo Tutorial第二部「Machine Learning」 * https://github.com/PyDataTokyo/pydata-tokyo-tutorial-1/blob/master/pydatatokyo_tutorial_ml.ipynb * https://pydata.tokyo/ipynb/tutorial-1/ml.html * 機械学習を使った分類モデルの生成 * 分類結果の検証 ``` from IPython.display import Image Image(url='http://graphics8.nytimes.com/images/section/learning/general/onthisday/big/0415_big.gif') ``` ## 2. ライブラリのインポートとデータの準備 ``` %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.metrics import accuracy_score, classification_report, confusion_matrix from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.grid_search import GridSearchCV from IPython.display import Image # Pandasになんか設定する # http://pandas.pydata.org/pandas-docs/stable/generated/pandas.set_option.html pd.set_option('chained_assignment', None) # matplotlibのスタイルを指定する plt.style.use('ggplot') plt.rc('xtick.major', size=0) plt.rc('ytick.major', size=0) df_train = pd.read_csv('train.csv') df_test = pd.read_csv('test.csv') df_train.tail() df_test.tail() ``` ## 3. ジェンダーモデルによる生存者推定、推定値の評価 ``` # 性別と生きたかどうかを取得 x = df_train['Sex'] y = df_train['Survived'] # 性別のみで推定(女性は生きる、男性は死ぬ y_pred = x.map({'female': 1, 'male': 0}).astype(int) # 正解率を表示 print('Accuracy: {:.3f}'.format(accuracy_score(y, y_pred))) # Precision(適合率)、Recall(再現率)、F1-score(F1スコア、F値)をclassification_reportで計算 print(classification_report(y, y_pred)) # 混同行列を表示 cm = confusion_matrix(y, y_pred) print(cm) # 混同行列をグラフで表示 # 真陽性(True Positive) 偽陰性(False Negative) # 偽陽性(False Positive) 真陰性(True Negative) def plot_confusion_matrix(cm): fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) ax.set_title('Confusion Matrix') fig.colorbar(im) target_names = ['not survived', 'survived'] tick_marks = np.arange(len(target_names)) ax.set_xticks(tick_marks) ax.set_xticklabels(target_names, rotation=45) ax.set_yticks(tick_marks) ax.set_yticklabels(target_names) ax.set_ylabel('True label') ax.set_xlabel('Predicted label') fig.tight_layout() plot_confusion_matrix(cm) # トレーニングデータと同様に、Kaggleに投稿するテストデータからも生存者を推定しましょう。 x_test = df_test['Sex'] y_test_pred = x_test.map({'female': 1, 'male': 0}).astype(int) # Kaggleに投稿するファイルを生成 df_kaggle = pd.DataFrame({'PassengerId': df_test['PassengerId'], 'Survived': np.array(y_test_pred)}) df_kaggle.to_csv('kaggle_gendermodel.csv', index=False) df_kaggle.head() ``` ## 4. ロジスティック回帰による生存者推定 ``` # 年齢、Pクラス、性別を使う X = df_train[['Age', 'Pclass', 'Sex']] y = df_train['Survived'] X.tail() # 年齢の欠損値を埋める X['AgeFill'] = X['Age'].fillna(X['Age'].mean()) X = X.drop(['Age'], axis=1) # 以下でも同じ # X['Age'] = X['Age'].fillna(X['Age'].mean()) # 性別を数値にする # 2つしか値がないから0, 1にしている X['Gender'] = X['Sex'].map({'female': 0, 'male': 1}).astype(int) X.tail() # 女性(Gender=0)で且つ、乗船クラスのランクが高い(Pclass=1)ほど、生存率が高いという仮説を表す新しい特徴量(Pclass_Gender)を作成する # Pclass_Genderは値が小さいほど生存率が高いことになる X['Pclass_Gender'] = X['Pclass'] + X['Gender'] X.tail() # 今回は特徴量としてPclass_GenderとAgeの2つを使うので、不要な特徴量を drop する X = X.drop(['Pclass', 'Sex', 'Gender'], axis=1) X.tail() # 横軸が年齢、縦軸がPclass_Genderのグラフを描く np.random.seed = 0 xmin, xmax = -5, 85 ymin, ymax = 0.5, 4.5 index_survived = y[y==0].index index_notsurvived = y[y==1].index fig, ax = plt.subplots() cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) sc = ax.scatter(X.loc[index_survived, 'AgeFill'], X.loc[index_survived, 'Pclass_Gender'] + (np.random.rand(len(index_survived))-0.5)*0.1, color='r', label='Not Survived', alpha=0.3) sc = ax.scatter(X.loc[index_notsurvived, 'AgeFill'], X.loc[index_notsurvived, 'Pclass_Gender'] + (np.random.rand(len(index_notsurvived))-0.5)*0.1, color='b', label='Survived', alpha=0.3) ax.set_xlabel('AgeFill') ax.set_ylabel('Pclass_Gender') ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) ax.legend(bbox_to_anchor=(1.4, 1.03)) plt.show() ``` ### トレーニングデータの分割 ``` # 80%のトレーニングデータ、20%のテストデータに分割 X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, random_state=1) print('Num of Training Samples: {}'.format(len(X_train))) print('Num of Validation Samples: {}'.format(len(X_val))) ``` ### ロジスティック回帰による推定 ``` clf = LogisticRegression() clf.fit(X_train, y_train) y_train_pred = clf.predict(X_train) y_val_pred = clf.predict(X_val) # 結果を評価 print('Accuracy on Training Set: {:.3f}'.format(accuracy_score(y_train, y_train_pred))) print('Accuracy on Validation Set: {:.3f}'.format(accuracy_score(y_val, y_val_pred))) # 混同行列を表示 cm = confusion_matrix(y_val, y_val_pred) print(cm) plot_confusion_matrix(cm) print(classification_report(y_val, y_val_pred)) h = 0.02 xmin, xmax = -5, 85 ymin, ymax = 0.5, 4.5 xx, yy = np.meshgrid(np.arange(xmin, xmax, h), np.arange(ymin, ymax, h)) Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] Z = Z.reshape(xx.shape) fig, ax = plt.subplots() levels = np.linspace(0, 1.0, 5) cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) contour = ax.contourf(xx, yy, Z, cmap=cm, levels=levels, alpha=0.8) ax.scatter(X_train.iloc[:, 0], X_train.iloc[:, 1]+(np.random.rand(len(X_train))-0.5)*0.1, c=y_train, cmap=cm_bright) ax.scatter(X_val.iloc[:, 0], X_val.iloc[:, 1]+(np.random.rand(len(X_val))-0.5)*0.1, c=y_val, cmap=cm_bright, alpha=0.5) ax.set_xlabel('AgeFill') ax.set_ylabel('Pclass_Gender') ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) fig.colorbar(contour) x1 = xmin x2 = xmax y1 = -1*(clf.intercept_[0]+clf.coef_[0][0]*xmin)/clf.coef_[0][1] y2 = -1*(clf.intercept_[0]+clf.coef_[0][0]*xmax)/clf.coef_[0][1] ax.plot([x1, x2] ,[y1, y2], 'k--') plt.show() # サポートベクターマシン使ってみる clf_svc_lin = SVC(kernel='linear', probability=True) clf_svc_lin.fit(X_train, y_train) y_train_svc_pred = clf_svc_lin.predict(X_train) y_val_svc_pred = clf_svc_lin.predict(X_val) print('Accuracy on Training Set: {:.3f}'.format(accuracy_score(y_train, y_train_svc_pred))) print('Accuracy on Validation Set: {:.3f}'.format(accuracy_score(y_val, y_val_svc_pred))) cm_svc = confusion_matrix(y_val, y_val_svc_pred) print(cm_svc) plot_confusion_matrix(cm_svc) print(classification_report(y_val, y_val_svc_pred)) ``` ## 5. 交差検証(クロスバリデーション)
github_jupyter
from IPython.display import Image Image(url='http://graphics8.nytimes.com/images/section/learning/general/onthisday/big/0415_big.gif') %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.metrics import accuracy_score, classification_report, confusion_matrix from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.grid_search import GridSearchCV from IPython.display import Image # Pandasになんか設定する # http://pandas.pydata.org/pandas-docs/stable/generated/pandas.set_option.html pd.set_option('chained_assignment', None) # matplotlibのスタイルを指定する plt.style.use('ggplot') plt.rc('xtick.major', size=0) plt.rc('ytick.major', size=0) df_train = pd.read_csv('train.csv') df_test = pd.read_csv('test.csv') df_train.tail() df_test.tail() # 性別と生きたかどうかを取得 x = df_train['Sex'] y = df_train['Survived'] # 性別のみで推定(女性は生きる、男性は死ぬ y_pred = x.map({'female': 1, 'male': 0}).astype(int) # 正解率を表示 print('Accuracy: {:.3f}'.format(accuracy_score(y, y_pred))) # Precision(適合率)、Recall(再現率)、F1-score(F1スコア、F値)をclassification_reportで計算 print(classification_report(y, y_pred)) # 混同行列を表示 cm = confusion_matrix(y, y_pred) print(cm) # 混同行列をグラフで表示 # 真陽性(True Positive) 偽陰性(False Negative) # 偽陽性(False Positive) 真陰性(True Negative) def plot_confusion_matrix(cm): fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) ax.set_title('Confusion Matrix') fig.colorbar(im) target_names = ['not survived', 'survived'] tick_marks = np.arange(len(target_names)) ax.set_xticks(tick_marks) ax.set_xticklabels(target_names, rotation=45) ax.set_yticks(tick_marks) ax.set_yticklabels(target_names) ax.set_ylabel('True label') ax.set_xlabel('Predicted label') fig.tight_layout() plot_confusion_matrix(cm) # トレーニングデータと同様に、Kaggleに投稿するテストデータからも生存者を推定しましょう。 x_test = df_test['Sex'] y_test_pred = x_test.map({'female': 1, 'male': 0}).astype(int) # Kaggleに投稿するファイルを生成 df_kaggle = pd.DataFrame({'PassengerId': df_test['PassengerId'], 'Survived': np.array(y_test_pred)}) df_kaggle.to_csv('kaggle_gendermodel.csv', index=False) df_kaggle.head() # 年齢、Pクラス、性別を使う X = df_train[['Age', 'Pclass', 'Sex']] y = df_train['Survived'] X.tail() # 年齢の欠損値を埋める X['AgeFill'] = X['Age'].fillna(X['Age'].mean()) X = X.drop(['Age'], axis=1) # 以下でも同じ # X['Age'] = X['Age'].fillna(X['Age'].mean()) # 性別を数値にする # 2つしか値がないから0, 1にしている X['Gender'] = X['Sex'].map({'female': 0, 'male': 1}).astype(int) X.tail() # 女性(Gender=0)で且つ、乗船クラスのランクが高い(Pclass=1)ほど、生存率が高いという仮説を表す新しい特徴量(Pclass_Gender)を作成する # Pclass_Genderは値が小さいほど生存率が高いことになる X['Pclass_Gender'] = X['Pclass'] + X['Gender'] X.tail() # 今回は特徴量としてPclass_GenderとAgeの2つを使うので、不要な特徴量を drop する X = X.drop(['Pclass', 'Sex', 'Gender'], axis=1) X.tail() # 横軸が年齢、縦軸がPclass_Genderのグラフを描く np.random.seed = 0 xmin, xmax = -5, 85 ymin, ymax = 0.5, 4.5 index_survived = y[y==0].index index_notsurvived = y[y==1].index fig, ax = plt.subplots() cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) sc = ax.scatter(X.loc[index_survived, 'AgeFill'], X.loc[index_survived, 'Pclass_Gender'] + (np.random.rand(len(index_survived))-0.5)*0.1, color='r', label='Not Survived', alpha=0.3) sc = ax.scatter(X.loc[index_notsurvived, 'AgeFill'], X.loc[index_notsurvived, 'Pclass_Gender'] + (np.random.rand(len(index_notsurvived))-0.5)*0.1, color='b', label='Survived', alpha=0.3) ax.set_xlabel('AgeFill') ax.set_ylabel('Pclass_Gender') ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) ax.legend(bbox_to_anchor=(1.4, 1.03)) plt.show() # 80%のトレーニングデータ、20%のテストデータに分割 X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, random_state=1) print('Num of Training Samples: {}'.format(len(X_train))) print('Num of Validation Samples: {}'.format(len(X_val))) clf = LogisticRegression() clf.fit(X_train, y_train) y_train_pred = clf.predict(X_train) y_val_pred = clf.predict(X_val) # 結果を評価 print('Accuracy on Training Set: {:.3f}'.format(accuracy_score(y_train, y_train_pred))) print('Accuracy on Validation Set: {:.3f}'.format(accuracy_score(y_val, y_val_pred))) # 混同行列を表示 cm = confusion_matrix(y_val, y_val_pred) print(cm) plot_confusion_matrix(cm) print(classification_report(y_val, y_val_pred)) h = 0.02 xmin, xmax = -5, 85 ymin, ymax = 0.5, 4.5 xx, yy = np.meshgrid(np.arange(xmin, xmax, h), np.arange(ymin, ymax, h)) Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] Z = Z.reshape(xx.shape) fig, ax = plt.subplots() levels = np.linspace(0, 1.0, 5) cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) contour = ax.contourf(xx, yy, Z, cmap=cm, levels=levels, alpha=0.8) ax.scatter(X_train.iloc[:, 0], X_train.iloc[:, 1]+(np.random.rand(len(X_train))-0.5)*0.1, c=y_train, cmap=cm_bright) ax.scatter(X_val.iloc[:, 0], X_val.iloc[:, 1]+(np.random.rand(len(X_val))-0.5)*0.1, c=y_val, cmap=cm_bright, alpha=0.5) ax.set_xlabel('AgeFill') ax.set_ylabel('Pclass_Gender') ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) fig.colorbar(contour) x1 = xmin x2 = xmax y1 = -1*(clf.intercept_[0]+clf.coef_[0][0]*xmin)/clf.coef_[0][1] y2 = -1*(clf.intercept_[0]+clf.coef_[0][0]*xmax)/clf.coef_[0][1] ax.plot([x1, x2] ,[y1, y2], 'k--') plt.show() # サポートベクターマシン使ってみる clf_svc_lin = SVC(kernel='linear', probability=True) clf_svc_lin.fit(X_train, y_train) y_train_svc_pred = clf_svc_lin.predict(X_train) y_val_svc_pred = clf_svc_lin.predict(X_val) print('Accuracy on Training Set: {:.3f}'.format(accuracy_score(y_train, y_train_svc_pred))) print('Accuracy on Validation Set: {:.3f}'.format(accuracy_score(y_val, y_val_svc_pred))) cm_svc = confusion_matrix(y_val, y_val_svc_pred) print(cm_svc) plot_confusion_matrix(cm_svc) print(classification_report(y_val, y_val_svc_pred))
0.529993
0.926304
<!--NAVIGATION--> < [机器学习](05.00-Machine-Learning.ipynb) | [目录](Index.ipynb) | [Scikit-Learn简介](05.02-Introducing-Scikit-Learn.ipynb) > <a href="https://colab.research.google.com/github/wangyingsm/Python-Data-Science-Handbook/blob/master/notebooks/05.01-What-Is-Machine-Learning.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a> # What Is Machine Learning? # 什么是机器学习? > Before we take a look at the details of various machine learning methods, let's start by looking at what machine learning is, and what it isn't. Machine learning is often categorized as a subfield of artificial intelligence, but I find that categorization can often be misleading at first brush. The study of machine learning certainly arose from research in this context, but in the data science application of machine learning methods, it's more helpful to think of machine learning as a means of *building models of data*. 在我们开始学习机器学习方法的细节之前,让我们先来了解机器学习是什么以及不是什么。机器学习经常被归为人工智能的一个子领域,但作者发现这种分类方式常常一开始就导致了误解。对机器学习的研究肯定是在这个环境中发展出来的,但是机器学习方法在数据科学应用中,它更适合被看成是*数据的构造模型*。 > Fundamentally, machine learning involves building mathematical models to help understand data. "Learning" enters the fray when we give these models *tunable parameters* that can be adapted to observed data; in this way the program can be considered to be "learning" from the data. Once these models have been fit to previously seen data, they can be used to predict and understand aspects of newly observed data. I'll leave to the reader the more philosophical digression regarding the extent to which this type of mathematical, model-based "learning" is similar to the "learning" exhibited by the human brain. 机器学习基本上就是关于构建数学模型来帮助我们理解数据。当我们为这些模型提供了*可调整的参数*时,“学习”能让我们从观察到的数据中调整这些参数。也就是说,这个过程可以被认为我们从数据中“学习”。一旦这些模型已经适应(拟合)了观察到的数据之后,它们就可以用来预测和理解新的数据。作者把这个问题的哲学思考留给读者,基于模型的“学习”确实与人脑展示的“学习”类似。 > Understanding the problem setting in machine learning is essential to using these tools effectively, and so we will start with some broad categorizations of the types of approaches we'll discuss here. 理解机器学习中的各种概念是有效使用这些工具的基础,因此我们首先介绍机器学习的分类以及方法的类型。 ## Categories of Machine Learning ## 机器学习分类 > At the most fundamental level, machine learning can be categorized into two main types: supervised learning and unsupervised learning. 在最基础的层次上,机器学习可以被分为两大类:有监督学习和无监督学习。 > *Supervised learning* involves somehow modeling the relationship between measured features of data and some label associated with the data; once this model is determined, it can be used to apply labels to new, unknown data. This is further subdivided into *classification* tasks and *regression* tasks: in classification, the labels are discrete categories, while in regression, the labels are continuous quantities. We will see examples of both types of supervised learning in the following section. *有监督学习*指的是在除了数据本身外,我们还拥有对数据进行的标记,有监督学习就是要建立两者之间的联系模型,然后这个模型就可以应用在新的数据上进行标记。它可以进一步分为*分类*和*回归*任务:在分类中,标记的是离散的分组,而在回归中,标记的是连续的量。我们在后续章节中会看到这两种有监督学习的例子。 > *Unsupervised learning* involves modeling the features of a dataset without reference to any label, and is often described as "letting the dataset speak for itself." These models include tasks such as *clustering* and *dimensionality reduction.* Clustering algorithms identify distinct groups of data, while dimensionality reduction algorithms search for more succinct representations of the data. We will see examples of both types of unsupervised learning in the following section. *无监督学习*是从没有标记的数据中建立模型,它常被描述为“让数据集自己说话”。这样的模型包括*聚类*和*降维*。聚类算法能识别数据中的分组,而降维算法寻找数据更简洁的表达形式。我们在后续章节中会看到这两种无监督学习的例子。 > In addition, there are so-called *semi-supervised learning* methods, which falls somewhere between supervised learning and unsupervised learning. Semi-supervised learning methods are often useful when only incomplete labels are available. 除此之外,还有一种被成为*半监督学习*的方法,介于有监督学习和无监督学习之间。半监督学习方法经常应用在不完整的数据标记的场合中。 ## Qualitative Examples of Machine Learning Applications ## 机器学习应用的定性例子 > To make these ideas more concrete, let's take a look at a few very simple examples of a machine learning task. These examples are meant to give an intuitive, non-quantitative overview of the types of machine learning tasks we will be looking at in this chapter. In later sections, we will go into more depth regarding the particular models and how they are used. For a preview of these more technical aspects, you can find the Python source that generates the following figures in the [Appendix: Figure Code](06.00-Figure-Code.ipynb). 要更具体的说明这些内容,我们来看一些非常简单的机器学习任务例子。这些例子为了给读者提供一个直观的,非定量的机器学习任务的概要介绍。在后续章节中,我们会深入介绍每一个模型以及它们是如何使用的。产生下面的图像的代码可以在[附录:产生图像的代码](06.00-Figure-Code.ipynb)中找到。 ### Classification: Predicting discrete labels ### 分类:预测离散的标签 > We will first take a look at a simple *classification* task, in which you are given a set of labeled points and want to use these to classify some unlabeled points. > Imagine that we have the data shown in this figure: 我们首先看一个简单的*分类*任务,你有一组标记过的点,然后你使用这些数据来标记新的未标记过的数据点。我们有下图展示的数据: ![](figures/05.01-classification-1.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Classification-Example-Figure-1) > Here we have two-dimensional data: that is, we have two *features* for each point, represented by the *(x,y)* positions of the points on the plane. In addition, we have one of two *class labels* for each point, here represented by the colors of the points. From these features and labels, we would like to create a model that will let us decide whether a new point should be labeled "blue" or "red." 这里我们有二维的数据:即这里面的每个点我们都有两个*特征*,使用平面中的*(x,y)*位置表示。除此之外,我们对每个点都有一个标记,标记一共有两种,上图中使用了颜色进行区分。使用这些特征和标记,我们可以建立一个模型,然后我们就可以对一个新的数据点进行标记,判断它属于“蓝色”还是“红色”。 > There are a number of possible models for such a classification task, but here we will use an extremely simple one. We will make the assumption that the two groups can be separated by drawing a straight line through the plane between them, such that points on each side of the line fall in the same group. Here the *model* is a quantitative version of the statement "a straight line separates the classes", while the *model parameters* are the particular numbers describing the location and orientation of that line for our data. The optimal values for these model parameters are learned from the data (this is the "learning" in machine learning), which is often called *training the model*. 对于这个分类任务来说可以有很多可能的模型,但是我们会使用一个特别简单的模型。我们假设这两组数据点可以使用一条平面上的直线进行区分,直线两边分别属于两个不同的组。这里的*模型*是“一条分类直线”说法的定量版本,而*模型中的参数*就是用来描述直线位置和方向的特殊数字。优化后的模型参数值是从数据中学习得到的,这个学习过程我们通常成为*训练模型*。 > The following figure shows a visual representation of what the trained model looks like for this data: 下面展示了一个训练好的模型的可视化图像: ![](figures/05.01-classification-2.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Classification-Example-Figure-2) > Now that this model has been trained, it can be generalized to new, unlabeled data. In other words, we can take a new set of data, draw this model line through it, and assign labels to the new points based on this model. This stage is usually called *prediction*. See the following figure: 当模型训练好之后,它就能泛化到新的未标记的数据上。换一种说法是,我们可以取一组新的数据,将模型的直线画上去穿过它们,然后给新的数据点定义标签。这个阶段通常被称为*预测*。参见下面的图: ![](figures/05.01-classification-3.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Classification-Example-Figure-3) > This is the basic idea of a classification task in machine learning, where "classification" indicates that the data has discrete class labels. At first glance this may look fairly trivial: it would be relatively easy to simply look at this data and draw such a discriminatory line to accomplish this classification. A benefit of the machine learning approach, however, is that it can generalize to much larger datasets in many more dimensions. 上面就是机器学习中分类任务的基本概念,这里的*分类*表明数据具有离散的类别标签。第一眼看上去这个任务显得很琐碎:观察数据并画出这样一条分类的直线显得相对来说很容易。但是机器学习方法的优势在于,它可以泛化到非常大的数据集上,以及更多的维度上。 > For example, this is similar to the task of automated spam detection for email; in this case, we might use the following features and labels: > - *feature 1*, *feature 2*, etc. $\to$ normalized counts of important words or phrases ("Viagra", "Nigerian prince", etc.) > - *label* $\to$ "spam" or "not spam" 例如,类似自动垃圾电子邮件识别,在这种情况下,我们可能会用到下面的特征和标签: - *特征1*、*特征2*等 $\to$ 正则化后的重要单词或短语的计数(“伟哥”,“尼日利亚王子”等) - *标签* $\to$ “垃圾邮件”或“非垃圾邮件” > For the training set, these labels might be determined by individual inspection of a small representative sample of emails; for the remaining emails, the label would be determined using the model. For a suitably trained classification algorithm with enough well-constructed features (typically thousands or millions of words or phrases), this type of approach can be very effective. We will see an example of such text-based classification in [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb). 对于这个训练集来说,这些标签可以通过检查一部分电子邮件的典型样本来获得,对于剩余的电子邮件,标签可以使用模型得到。对于一个良好训练的分类算法而言,它包括足够多的特征(上千或上百万的单词或短语),这样的方法会非常有效。我们会在[深入:朴素贝叶斯分类](05.05-Naive-Bayes.ipynb)一节中看到一个文本分类的例子。 > Some important classification algorithms that we will discuss in more detail are Gaussian naive Bayes (see [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb)), support vector machines (see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)), and random forest classification (see [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb)). 我们后续会讨论到的一些重要的分类算法包括高斯朴素贝叶斯(参见[深入:朴素贝叶斯分类](05.05-Naive-Bayes.ipynb)),支持向量机(参见[深入:支持向量机](05.07-Support-Vector-Machines.ipynb))和随机森林分类(参见[深入:决策树和随机森林](05.08-Random-Forests.ipynb))。 ### Regression: Predicting continuous labels ### 回归:预测连续标签 > In contrast with the discrete labels of a classification algorithm, we will next look at a simple *regression* task in which the labels are continuous quantities. 对比离散标签分类算法,我们下面来看一个简单的*回归*任务,它的标签是一个连续的数量。 > Consider the data shown in the following figure, which consists of a set of points each with a continuous label: 考虑如下图展示的数据,包含着一组的数据点每一个都有一个连续的标签: ![](figures/05.01-regression-1.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Regression-Example-Figure-1) > As with the classification example, we have two-dimensional data: that is, there are two features describing each data point. The color of each point represents the continuous label for that point. 就像分类例子中那样,我们有着二维的数据:即每个数据点都有两个特征。每个点的颜色代表这这个点的连续标签。 > There are a number of possible regression models we might use for this type of data, but here we will use a simple linear regression to predict the points. This simple linear regression model assumes that if we treat the label as a third spatial dimension, we can fit a plane to the data. This is a higher-level generalization of the well-known problem of fitting a line to data with two coordinates. 对于这个数据集来说,可以有很多种可能的回归模型,但是这里我们会使用一种简单的线性回归来预测数据点。这个简单的线性回归模型假设我们将数据标签作为第三个空间维度,我们可以在上面使用一个平面来拟合数据。这是在两个坐标中使用一根直线来拟合数据的泛化版本。 > We can visualize this setup as shown in the following figure: 可以使用下图可视化这个设置: ![](figures/05.01-regression-2.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Regression-Example-Figure-2) > Notice that the *feature 1-feature 2* plane here is the same as in the two-dimensional plot from before; in this case, however, we have represented the labels by both color and three-dimensional axis position. From this view, it seems reasonable that fitting a plane through this three-dimensional data would allow us to predict the expected label for any set of input parameters. Returning to the two-dimensional projection, when we fit such a plane we get the result shown in the following figure: 注意上图中的*特征1 - 特征2*平面与前面二维图中数据点是一致的;我们使用了颜色以及三维坐标表示数据点的标签。从上图中我们可以看到,通过这个平面可以让我们对任意输入的数据点参数进行标签的预测。返回到二维投射,当我们拟合了这个平面我们会得到下图的结果: ![](figures/05.01-regression-3.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Regression-Example-Figure-3) > This plane of fit gives us what we need to predict labels for new points. Visually, we find the results shown in the following figure: 拟合得到的平面能为我们提供预测新数据点标签的能力。下面的图像展示了预测的结果: ![](figures/05.01-regression-4.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Regression-Example-Figure-4) > As with the classification example, this may seem rather trivial in a low number of dimensions. But the power of these methods is that they can be straightforwardly applied and evaluated in the case of data with many, many features. 同样的,这个方法在维度较少时显得很普通。但是当数据的特征很多时,这个方法的威力就显现出来了。 > For example, this is similar to the task of computing the distance to galaxies observed through a telescope—in this case, we might use the following features and labels: > - *feature 1*, *feature 2*, etc. $\to$ brightness of each galaxy at one of several wave lengths or colors > - *label* $\to$ distance or redshift of the galaxy 例如,类似通过望远镜计算星系之间距离任务时,我们会使用下面的特征和标签: - *特征1*、*特征2*等 $\to$ 每个星系在不同波长或颜色范围上的亮度值 - *标签* $\to$ 星系的距离或红移 > The distances for a small number of these galaxies might be determined through an independent set of (typically more expensive) observations. Distances to remaining galaxies could then be estimated using a suitable regression model, without the need to employ the more expensive observation across the entire set. In astronomy circles, this is known as the "photometric redshift" problem. 少量的星系距离可以通过独立的观测方式(通常更加昂贵)来获得。剩余的星系距离可以使用合适的回归模型进行估算,避免了在所有星系上使用昂贵观测方法的需要。在天文学领域,这被称为*光度红移*问题。 > Some important regression algorithms that we will discuss are linear regression (see [In Depth: Linear Regression](05.06-Linear-Regression.ipynb)), support vector machines (see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)), and random forest regression (see [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb)). 我们还会介绍其他一些重要的回归算法,包括线性回归(参见[深入:线性回归](05.06-Linear-Regression.ipynb)),支持向量机(参见[深入:支持向量机](05.07-Support-Vector-Machines.ipynb))和随机森林回归(参见[深入:决策树和随机森林](05.08-Random-Forests.ipynb))。 ### Clustering: Inferring labels on unlabeled data ### 聚类:在未标记的数据上推断标签 > The classification and regression illustrations we just looked at are examples of supervised learning algorithms, in which we are trying to build a model that will predict labels for new data. Unsupervised learning involves models that describe data without reference to any known labels. 上面介绍的分类和回归为我们展示了使用有监督学习算法的例子,我们会从数据中学习得到一个模型然后使用它预测新数据的标签。无监督学习用来描述数据的模型是从没有任何已知标签的数据中获得的。 > One common case of unsupervised learning is "clustering," in which data is automatically assigned to some number of discrete groups. For example, we might have some two-dimensional data like that shown in the following figure: 最常见的无监督学习场景是“聚类”,其中的数据自动组合成一些离散的分组。例如下图中展示的二维数据: ![](figures/05.01-clustering-1.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Clustering-Example-Figure-2) > By eye, it is clear that each of these points is part of a distinct group. Given this input, a clustering model will use the intrinsic structure of the data to determine which points are related. Using the very fast and intuitive *k*-means algorithm (see [In Depth: K-Means Clustering](05.11-K-Means.ipynb)), we find the clusters shown in the following figure: 肉眼观察可以知道很显然这些数据点是不同分组的组成部分。对于这个输入来说,一个聚类模型会使用输入数据的内在结构来找到哪些点是关联的。使用下面快速直观的*k均值*算法(参见[深入:k均值聚类](05.11-K-Means.ipynb)),我们会发现如下如的聚类: ![](figures/05.01-clustering-2.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Clustering-Example-Figure-2) > *k*-means fits a model consisting of *k* cluster centers; the optimal centers are assumed to be those that minimize the distance of each point from its assigned center. Again, this might seem like a trivial exercise in two dimensions, but as our data becomes larger and more complex, such clustering algorithms can be employed to extract useful information from the dataset. *k均值*会适应训练出一个包括*k*个聚类中心点的模型;优化后的中心点应该是属于这个聚类群的所有点距离之和最小的点。还是需要说明的是在二维的情况下,这看起来有点平淡无奇,但是当我们数据变得更大更复杂时,这种聚类算法可以用来从数据集中提取出有用的信息。 > We will discuss the *k*-means algorithm in more depth in [In Depth: K-Means Clustering](05.11-K-Means.ipynb). Other important clustering algorithms include Gaussian mixture models (See [In Depth: Gaussian Mixture Models](05.12-Gaussian-Mixtures.ipynb)) and spectral clustering (See [Scikit-Learn's clustering documentation](http://scikit-learn.org/stable/modules/clustering.html)). 我们会在[深入:k均值聚类](05.11-K-Means.ipynb)一节中深入讨论k均值算法。其他重要的聚类算法包括高斯混合模型(参见[深入:高斯混合模型](05.12-Gaussian-Mixtures.ipynb))和谱聚类(参见[Scikit-Learn聚类在线文档](http://scikit-learn.org/stable/modules/clustering.html))。 ### Dimensionality reduction: Inferring structure of unlabeled data ### 降维:推断无标记数据的结构 > Dimensionality reduction is another example of an unsupervised algorithm, in which labels or other information are inferred from the structure of the dataset itself. Dimensionality reduction is a bit more abstract than the examples we looked at before, but generally it seeks to pull out some low-dimensional representation of data that in some way preserves relevant qualities of the full dataset. Different dimensionality reduction routines measure these relevant qualities in different ways, as we will see in [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb). 降维是另一个无监督算法的例子,它能从数据集本身的结构推断标签或其他的信息。降维的例子比起前面那些算法的例子稍微复杂一些,总的来说,降维通过用更少维度的数据表达但是却保留了完整数据集的相关关键信息。不同的降维算法从不同方面衡量这些相关信息,就像我们会在[深入:流形学习](05.10-Manifold-Learning.ipynb)中看到的那样。 > As an example of this, consider the data shown in the following figure: 使用下图展示的数据作为例子: ![](figures/05.01-dimesionality-1.png) [附录中产生图像的代码](06.00-Figure-Code.ipynb#Dimensionality-Reduction-Example-Figure-1) > Visually, it is clear that there is some structure in this data: it is drawn from a one-dimensional line that is arranged in a spiral within this two-dimensional space. In a sense, you could say that this data is "intrinsically" only one dimensional, though this one-dimensional data is embedded in higher-dimensional space. A suitable dimensionality reduction model in this case would be sensitive to this nonlinear embedded structure, and be able to pull out this lower-dimensionality representation. 从图上很容易看出数据有一些内在的结构:数据是由一维的线卷曲成螺旋状的二维形状。或者直觉上你可以认为数据本质上是一维的,不过是嵌入在一个更高维度的空间中。一个合适的降维模型可以在这个情况下感知这种非线性的内嵌结构,并且能够将其低维度的数据表现方式提取出来。 > The following figure shows a visualization of the results of the Isomap algorithm, a manifold learning algorithm that does exactly this: 下面展示了使用Isomap算法的可视化结果,这是一种适合该应用场景的流形学习算法: ![](figures/05.01-dimesionality-2.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Dimensionality-Reduction-Example-Figure-2) > Notice that the colors (which represent the extracted one-dimensional latent variable) change uniformly along the spiral, which indicates that the algorithm did in fact detect the structure we saw by eye. As with the previous examples, the power of dimensionality reduction algorithms becomes clearer in higher-dimensional cases. For example, we might wish to visualize important relationships within a dataset that has 100 or 1,000 features. Visualizing 1,000-dimensional data is a challenge, and one way we can make this more manageable is to use a dimensionality reduction technique to reduce the data to two or three dimensions. 注意到上图中的颜色(代表着提取出来的一维隐变量)是沿着螺旋线均匀变化的,这表明算法确实能够检测到我们肉眼观察到的结构。降维算法的威力同样可以在更高维度的数据中更好的展现出来。例如,我们希望将具有100或1000个特征的数据集的重要关联关系在图中可视化出来,可视化1000维度的数据是非常具有挑战性的,我们可以通过降维技术将数据维度减少到二维或三维,这就很容易实现可视化了。 > Some important dimensionality reduction algorithms that we will discuss are principal component analysis (see [In Depth: Principal Component Analysis](05.09-Principal-Component-Analysis.ipynb)) and various manifold learning algorithms, including Isomap and locally linear embedding (See [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb)). 我们在本章中会介绍一些重要的降维算法,包括主成分分析(参见[深入:主成分分析](05.09-Principal-Component-Analysis.ipynb))和不同的流形学习算法,如Isomap和局部线性嵌入(参见[深入:流形学习](05.10-Manifold-Learning.ipynb))。 ## Summary ## 总结 > Here we have seen a few simple examples of some of the basic types of machine learning approaches. Needless to say, there are a number of important practical details that we have glossed over, but I hope this section was enough to give you a basic idea of what types of problems machine learning approaches can solve. 本节中我们看到了一些基本机器学习方法的简单例子。无需说明也看得出来,我们只是一笔带过的进行了相关介绍,但通过本节的内容希望能为读者提供了关于机器学习方法能够解决的问题类型的基本概念。 > In short, we saw the following: > - *Supervised learning*: Models that can predict labels based on labeled training data > - *Classification*: Models that predict labels as two or more discrete categories > - *Regression*: Models that predict continuous labels > - *Unsupervised learning*: Models that identify structure in unlabeled data > - *Clustering*: Models that detect and identify distinct groups in the data > - *Dimensionality reduction*: Models that detect and identify lower-dimensional structure in higher-dimensional data 简单来说,有如下的主要几个方面: - *有监督学习*:建立一个能够根据带标记的训练数据对数据进行标签预测的模型 - *分类*:建立一个能够预测两个或多个离散分组标签的模型 - *回归*:建立一个能够预测连续标签的模型 - *无监督学习*:建立一个能够识别未标记数据内在结构的模型 - *聚类*:建立一个检查和识别数据不同分组的模型 - *降维*:建立一个能发现高维度数据在低维度情况下结构的模型 > In the following sections we will go into much greater depth within these categories, and see some more interesting examples of where these concepts can be useful. 在后续章节中,我们会深入到上述的这些机器学习方法类型中,还有看到更多这些方法能发挥作用的有趣的例子。 > All of the figures in the preceding discussion are generated based on actual machine learning computations; the code behind them can be found in [Appendix: Figure Code](06.00-Figure-Code.ipynb). 本节中所有的图像都是使用真实的机器学习计算生成的;产生图像的代码可以在[附录:生成图像的代码](06.00-Figure-Code.ipynb)中找到。 <!--NAVIGATION--> < [机器学习](05.00-Machine-Learning.ipynb) | [目录](Index.ipynb) | [Scikit-Learn简介](05.02-Introducing-Scikit-Learn.ipynb) > <a href="https://colab.research.google.com/github/wangyingsm/Python-Data-Science-Handbook/blob/master/notebooks/05.01-What-Is-Machine-Learning.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
github_jupyter
<!--NAVIGATION--> < [机器学习](05.00-Machine-Learning.ipynb) | [目录](Index.ipynb) | [Scikit-Learn简介](05.02-Introducing-Scikit-Learn.ipynb) > <a href="https://colab.research.google.com/github/wangyingsm/Python-Data-Science-Handbook/blob/master/notebooks/05.01-What-Is-Machine-Learning.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a> # What Is Machine Learning? # 什么是机器学习? > Before we take a look at the details of various machine learning methods, let's start by looking at what machine learning is, and what it isn't. Machine learning is often categorized as a subfield of artificial intelligence, but I find that categorization can often be misleading at first brush. The study of machine learning certainly arose from research in this context, but in the data science application of machine learning methods, it's more helpful to think of machine learning as a means of *building models of data*. 在我们开始学习机器学习方法的细节之前,让我们先来了解机器学习是什么以及不是什么。机器学习经常被归为人工智能的一个子领域,但作者发现这种分类方式常常一开始就导致了误解。对机器学习的研究肯定是在这个环境中发展出来的,但是机器学习方法在数据科学应用中,它更适合被看成是*数据的构造模型*。 > Fundamentally, machine learning involves building mathematical models to help understand data. "Learning" enters the fray when we give these models *tunable parameters* that can be adapted to observed data; in this way the program can be considered to be "learning" from the data. Once these models have been fit to previously seen data, they can be used to predict and understand aspects of newly observed data. I'll leave to the reader the more philosophical digression regarding the extent to which this type of mathematical, model-based "learning" is similar to the "learning" exhibited by the human brain. 机器学习基本上就是关于构建数学模型来帮助我们理解数据。当我们为这些模型提供了*可调整的参数*时,“学习”能让我们从观察到的数据中调整这些参数。也就是说,这个过程可以被认为我们从数据中“学习”。一旦这些模型已经适应(拟合)了观察到的数据之后,它们就可以用来预测和理解新的数据。作者把这个问题的哲学思考留给读者,基于模型的“学习”确实与人脑展示的“学习”类似。 > Understanding the problem setting in machine learning is essential to using these tools effectively, and so we will start with some broad categorizations of the types of approaches we'll discuss here. 理解机器学习中的各种概念是有效使用这些工具的基础,因此我们首先介绍机器学习的分类以及方法的类型。 ## Categories of Machine Learning ## 机器学习分类 > At the most fundamental level, machine learning can be categorized into two main types: supervised learning and unsupervised learning. 在最基础的层次上,机器学习可以被分为两大类:有监督学习和无监督学习。 > *Supervised learning* involves somehow modeling the relationship between measured features of data and some label associated with the data; once this model is determined, it can be used to apply labels to new, unknown data. This is further subdivided into *classification* tasks and *regression* tasks: in classification, the labels are discrete categories, while in regression, the labels are continuous quantities. We will see examples of both types of supervised learning in the following section. *有监督学习*指的是在除了数据本身外,我们还拥有对数据进行的标记,有监督学习就是要建立两者之间的联系模型,然后这个模型就可以应用在新的数据上进行标记。它可以进一步分为*分类*和*回归*任务:在分类中,标记的是离散的分组,而在回归中,标记的是连续的量。我们在后续章节中会看到这两种有监督学习的例子。 > *Unsupervised learning* involves modeling the features of a dataset without reference to any label, and is often described as "letting the dataset speak for itself." These models include tasks such as *clustering* and *dimensionality reduction.* Clustering algorithms identify distinct groups of data, while dimensionality reduction algorithms search for more succinct representations of the data. We will see examples of both types of unsupervised learning in the following section. *无监督学习*是从没有标记的数据中建立模型,它常被描述为“让数据集自己说话”。这样的模型包括*聚类*和*降维*。聚类算法能识别数据中的分组,而降维算法寻找数据更简洁的表达形式。我们在后续章节中会看到这两种无监督学习的例子。 > In addition, there are so-called *semi-supervised learning* methods, which falls somewhere between supervised learning and unsupervised learning. Semi-supervised learning methods are often useful when only incomplete labels are available. 除此之外,还有一种被成为*半监督学习*的方法,介于有监督学习和无监督学习之间。半监督学习方法经常应用在不完整的数据标记的场合中。 ## Qualitative Examples of Machine Learning Applications ## 机器学习应用的定性例子 > To make these ideas more concrete, let's take a look at a few very simple examples of a machine learning task. These examples are meant to give an intuitive, non-quantitative overview of the types of machine learning tasks we will be looking at in this chapter. In later sections, we will go into more depth regarding the particular models and how they are used. For a preview of these more technical aspects, you can find the Python source that generates the following figures in the [Appendix: Figure Code](06.00-Figure-Code.ipynb). 要更具体的说明这些内容,我们来看一些非常简单的机器学习任务例子。这些例子为了给读者提供一个直观的,非定量的机器学习任务的概要介绍。在后续章节中,我们会深入介绍每一个模型以及它们是如何使用的。产生下面的图像的代码可以在[附录:产生图像的代码](06.00-Figure-Code.ipynb)中找到。 ### Classification: Predicting discrete labels ### 分类:预测离散的标签 > We will first take a look at a simple *classification* task, in which you are given a set of labeled points and want to use these to classify some unlabeled points. > Imagine that we have the data shown in this figure: 我们首先看一个简单的*分类*任务,你有一组标记过的点,然后你使用这些数据来标记新的未标记过的数据点。我们有下图展示的数据: ![](figures/05.01-classification-1.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Classification-Example-Figure-1) > Here we have two-dimensional data: that is, we have two *features* for each point, represented by the *(x,y)* positions of the points on the plane. In addition, we have one of two *class labels* for each point, here represented by the colors of the points. From these features and labels, we would like to create a model that will let us decide whether a new point should be labeled "blue" or "red." 这里我们有二维的数据:即这里面的每个点我们都有两个*特征*,使用平面中的*(x,y)*位置表示。除此之外,我们对每个点都有一个标记,标记一共有两种,上图中使用了颜色进行区分。使用这些特征和标记,我们可以建立一个模型,然后我们就可以对一个新的数据点进行标记,判断它属于“蓝色”还是“红色”。 > There are a number of possible models for such a classification task, but here we will use an extremely simple one. We will make the assumption that the two groups can be separated by drawing a straight line through the plane between them, such that points on each side of the line fall in the same group. Here the *model* is a quantitative version of the statement "a straight line separates the classes", while the *model parameters* are the particular numbers describing the location and orientation of that line for our data. The optimal values for these model parameters are learned from the data (this is the "learning" in machine learning), which is often called *training the model*. 对于这个分类任务来说可以有很多可能的模型,但是我们会使用一个特别简单的模型。我们假设这两组数据点可以使用一条平面上的直线进行区分,直线两边分别属于两个不同的组。这里的*模型*是“一条分类直线”说法的定量版本,而*模型中的参数*就是用来描述直线位置和方向的特殊数字。优化后的模型参数值是从数据中学习得到的,这个学习过程我们通常成为*训练模型*。 > The following figure shows a visual representation of what the trained model looks like for this data: 下面展示了一个训练好的模型的可视化图像: ![](figures/05.01-classification-2.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Classification-Example-Figure-2) > Now that this model has been trained, it can be generalized to new, unlabeled data. In other words, we can take a new set of data, draw this model line through it, and assign labels to the new points based on this model. This stage is usually called *prediction*. See the following figure: 当模型训练好之后,它就能泛化到新的未标记的数据上。换一种说法是,我们可以取一组新的数据,将模型的直线画上去穿过它们,然后给新的数据点定义标签。这个阶段通常被称为*预测*。参见下面的图: ![](figures/05.01-classification-3.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Classification-Example-Figure-3) > This is the basic idea of a classification task in machine learning, where "classification" indicates that the data has discrete class labels. At first glance this may look fairly trivial: it would be relatively easy to simply look at this data and draw such a discriminatory line to accomplish this classification. A benefit of the machine learning approach, however, is that it can generalize to much larger datasets in many more dimensions. 上面就是机器学习中分类任务的基本概念,这里的*分类*表明数据具有离散的类别标签。第一眼看上去这个任务显得很琐碎:观察数据并画出这样一条分类的直线显得相对来说很容易。但是机器学习方法的优势在于,它可以泛化到非常大的数据集上,以及更多的维度上。 > For example, this is similar to the task of automated spam detection for email; in this case, we might use the following features and labels: > - *feature 1*, *feature 2*, etc. $\to$ normalized counts of important words or phrases ("Viagra", "Nigerian prince", etc.) > - *label* $\to$ "spam" or "not spam" 例如,类似自动垃圾电子邮件识别,在这种情况下,我们可能会用到下面的特征和标签: - *特征1*、*特征2*等 $\to$ 正则化后的重要单词或短语的计数(“伟哥”,“尼日利亚王子”等) - *标签* $\to$ “垃圾邮件”或“非垃圾邮件” > For the training set, these labels might be determined by individual inspection of a small representative sample of emails; for the remaining emails, the label would be determined using the model. For a suitably trained classification algorithm with enough well-constructed features (typically thousands or millions of words or phrases), this type of approach can be very effective. We will see an example of such text-based classification in [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb). 对于这个训练集来说,这些标签可以通过检查一部分电子邮件的典型样本来获得,对于剩余的电子邮件,标签可以使用模型得到。对于一个良好训练的分类算法而言,它包括足够多的特征(上千或上百万的单词或短语),这样的方法会非常有效。我们会在[深入:朴素贝叶斯分类](05.05-Naive-Bayes.ipynb)一节中看到一个文本分类的例子。 > Some important classification algorithms that we will discuss in more detail are Gaussian naive Bayes (see [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb)), support vector machines (see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)), and random forest classification (see [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb)). 我们后续会讨论到的一些重要的分类算法包括高斯朴素贝叶斯(参见[深入:朴素贝叶斯分类](05.05-Naive-Bayes.ipynb)),支持向量机(参见[深入:支持向量机](05.07-Support-Vector-Machines.ipynb))和随机森林分类(参见[深入:决策树和随机森林](05.08-Random-Forests.ipynb))。 ### Regression: Predicting continuous labels ### 回归:预测连续标签 > In contrast with the discrete labels of a classification algorithm, we will next look at a simple *regression* task in which the labels are continuous quantities. 对比离散标签分类算法,我们下面来看一个简单的*回归*任务,它的标签是一个连续的数量。 > Consider the data shown in the following figure, which consists of a set of points each with a continuous label: 考虑如下图展示的数据,包含着一组的数据点每一个都有一个连续的标签: ![](figures/05.01-regression-1.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Regression-Example-Figure-1) > As with the classification example, we have two-dimensional data: that is, there are two features describing each data point. The color of each point represents the continuous label for that point. 就像分类例子中那样,我们有着二维的数据:即每个数据点都有两个特征。每个点的颜色代表这这个点的连续标签。 > There are a number of possible regression models we might use for this type of data, but here we will use a simple linear regression to predict the points. This simple linear regression model assumes that if we treat the label as a third spatial dimension, we can fit a plane to the data. This is a higher-level generalization of the well-known problem of fitting a line to data with two coordinates. 对于这个数据集来说,可以有很多种可能的回归模型,但是这里我们会使用一种简单的线性回归来预测数据点。这个简单的线性回归模型假设我们将数据标签作为第三个空间维度,我们可以在上面使用一个平面来拟合数据。这是在两个坐标中使用一根直线来拟合数据的泛化版本。 > We can visualize this setup as shown in the following figure: 可以使用下图可视化这个设置: ![](figures/05.01-regression-2.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Regression-Example-Figure-2) > Notice that the *feature 1-feature 2* plane here is the same as in the two-dimensional plot from before; in this case, however, we have represented the labels by both color and three-dimensional axis position. From this view, it seems reasonable that fitting a plane through this three-dimensional data would allow us to predict the expected label for any set of input parameters. Returning to the two-dimensional projection, when we fit such a plane we get the result shown in the following figure: 注意上图中的*特征1 - 特征2*平面与前面二维图中数据点是一致的;我们使用了颜色以及三维坐标表示数据点的标签。从上图中我们可以看到,通过这个平面可以让我们对任意输入的数据点参数进行标签的预测。返回到二维投射,当我们拟合了这个平面我们会得到下图的结果: ![](figures/05.01-regression-3.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Regression-Example-Figure-3) > This plane of fit gives us what we need to predict labels for new points. Visually, we find the results shown in the following figure: 拟合得到的平面能为我们提供预测新数据点标签的能力。下面的图像展示了预测的结果: ![](figures/05.01-regression-4.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Regression-Example-Figure-4) > As with the classification example, this may seem rather trivial in a low number of dimensions. But the power of these methods is that they can be straightforwardly applied and evaluated in the case of data with many, many features. 同样的,这个方法在维度较少时显得很普通。但是当数据的特征很多时,这个方法的威力就显现出来了。 > For example, this is similar to the task of computing the distance to galaxies observed through a telescope—in this case, we might use the following features and labels: > - *feature 1*, *feature 2*, etc. $\to$ brightness of each galaxy at one of several wave lengths or colors > - *label* $\to$ distance or redshift of the galaxy 例如,类似通过望远镜计算星系之间距离任务时,我们会使用下面的特征和标签: - *特征1*、*特征2*等 $\to$ 每个星系在不同波长或颜色范围上的亮度值 - *标签* $\to$ 星系的距离或红移 > The distances for a small number of these galaxies might be determined through an independent set of (typically more expensive) observations. Distances to remaining galaxies could then be estimated using a suitable regression model, without the need to employ the more expensive observation across the entire set. In astronomy circles, this is known as the "photometric redshift" problem. 少量的星系距离可以通过独立的观测方式(通常更加昂贵)来获得。剩余的星系距离可以使用合适的回归模型进行估算,避免了在所有星系上使用昂贵观测方法的需要。在天文学领域,这被称为*光度红移*问题。 > Some important regression algorithms that we will discuss are linear regression (see [In Depth: Linear Regression](05.06-Linear-Regression.ipynb)), support vector machines (see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)), and random forest regression (see [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb)). 我们还会介绍其他一些重要的回归算法,包括线性回归(参见[深入:线性回归](05.06-Linear-Regression.ipynb)),支持向量机(参见[深入:支持向量机](05.07-Support-Vector-Machines.ipynb))和随机森林回归(参见[深入:决策树和随机森林](05.08-Random-Forests.ipynb))。 ### Clustering: Inferring labels on unlabeled data ### 聚类:在未标记的数据上推断标签 > The classification and regression illustrations we just looked at are examples of supervised learning algorithms, in which we are trying to build a model that will predict labels for new data. Unsupervised learning involves models that describe data without reference to any known labels. 上面介绍的分类和回归为我们展示了使用有监督学习算法的例子,我们会从数据中学习得到一个模型然后使用它预测新数据的标签。无监督学习用来描述数据的模型是从没有任何已知标签的数据中获得的。 > One common case of unsupervised learning is "clustering," in which data is automatically assigned to some number of discrete groups. For example, we might have some two-dimensional data like that shown in the following figure: 最常见的无监督学习场景是“聚类”,其中的数据自动组合成一些离散的分组。例如下图中展示的二维数据: ![](figures/05.01-clustering-1.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Clustering-Example-Figure-2) > By eye, it is clear that each of these points is part of a distinct group. Given this input, a clustering model will use the intrinsic structure of the data to determine which points are related. Using the very fast and intuitive *k*-means algorithm (see [In Depth: K-Means Clustering](05.11-K-Means.ipynb)), we find the clusters shown in the following figure: 肉眼观察可以知道很显然这些数据点是不同分组的组成部分。对于这个输入来说,一个聚类模型会使用输入数据的内在结构来找到哪些点是关联的。使用下面快速直观的*k均值*算法(参见[深入:k均值聚类](05.11-K-Means.ipynb)),我们会发现如下如的聚类: ![](figures/05.01-clustering-2.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Clustering-Example-Figure-2) > *k*-means fits a model consisting of *k* cluster centers; the optimal centers are assumed to be those that minimize the distance of each point from its assigned center. Again, this might seem like a trivial exercise in two dimensions, but as our data becomes larger and more complex, such clustering algorithms can be employed to extract useful information from the dataset. *k均值*会适应训练出一个包括*k*个聚类中心点的模型;优化后的中心点应该是属于这个聚类群的所有点距离之和最小的点。还是需要说明的是在二维的情况下,这看起来有点平淡无奇,但是当我们数据变得更大更复杂时,这种聚类算法可以用来从数据集中提取出有用的信息。 > We will discuss the *k*-means algorithm in more depth in [In Depth: K-Means Clustering](05.11-K-Means.ipynb). Other important clustering algorithms include Gaussian mixture models (See [In Depth: Gaussian Mixture Models](05.12-Gaussian-Mixtures.ipynb)) and spectral clustering (See [Scikit-Learn's clustering documentation](http://scikit-learn.org/stable/modules/clustering.html)). 我们会在[深入:k均值聚类](05.11-K-Means.ipynb)一节中深入讨论k均值算法。其他重要的聚类算法包括高斯混合模型(参见[深入:高斯混合模型](05.12-Gaussian-Mixtures.ipynb))和谱聚类(参见[Scikit-Learn聚类在线文档](http://scikit-learn.org/stable/modules/clustering.html))。 ### Dimensionality reduction: Inferring structure of unlabeled data ### 降维:推断无标记数据的结构 > Dimensionality reduction is another example of an unsupervised algorithm, in which labels or other information are inferred from the structure of the dataset itself. Dimensionality reduction is a bit more abstract than the examples we looked at before, but generally it seeks to pull out some low-dimensional representation of data that in some way preserves relevant qualities of the full dataset. Different dimensionality reduction routines measure these relevant qualities in different ways, as we will see in [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb). 降维是另一个无监督算法的例子,它能从数据集本身的结构推断标签或其他的信息。降维的例子比起前面那些算法的例子稍微复杂一些,总的来说,降维通过用更少维度的数据表达但是却保留了完整数据集的相关关键信息。不同的降维算法从不同方面衡量这些相关信息,就像我们会在[深入:流形学习](05.10-Manifold-Learning.ipynb)中看到的那样。 > As an example of this, consider the data shown in the following figure: 使用下图展示的数据作为例子: ![](figures/05.01-dimesionality-1.png) [附录中产生图像的代码](06.00-Figure-Code.ipynb#Dimensionality-Reduction-Example-Figure-1) > Visually, it is clear that there is some structure in this data: it is drawn from a one-dimensional line that is arranged in a spiral within this two-dimensional space. In a sense, you could say that this data is "intrinsically" only one dimensional, though this one-dimensional data is embedded in higher-dimensional space. A suitable dimensionality reduction model in this case would be sensitive to this nonlinear embedded structure, and be able to pull out this lower-dimensionality representation. 从图上很容易看出数据有一些内在的结构:数据是由一维的线卷曲成螺旋状的二维形状。或者直觉上你可以认为数据本质上是一维的,不过是嵌入在一个更高维度的空间中。一个合适的降维模型可以在这个情况下感知这种非线性的内嵌结构,并且能够将其低维度的数据表现方式提取出来。 > The following figure shows a visualization of the results of the Isomap algorithm, a manifold learning algorithm that does exactly this: 下面展示了使用Isomap算法的可视化结果,这是一种适合该应用场景的流形学习算法: ![](figures/05.01-dimesionality-2.png) [附录中生成图像的代码](06.00-Figure-Code.ipynb#Dimensionality-Reduction-Example-Figure-2) > Notice that the colors (which represent the extracted one-dimensional latent variable) change uniformly along the spiral, which indicates that the algorithm did in fact detect the structure we saw by eye. As with the previous examples, the power of dimensionality reduction algorithms becomes clearer in higher-dimensional cases. For example, we might wish to visualize important relationships within a dataset that has 100 or 1,000 features. Visualizing 1,000-dimensional data is a challenge, and one way we can make this more manageable is to use a dimensionality reduction technique to reduce the data to two or three dimensions. 注意到上图中的颜色(代表着提取出来的一维隐变量)是沿着螺旋线均匀变化的,这表明算法确实能够检测到我们肉眼观察到的结构。降维算法的威力同样可以在更高维度的数据中更好的展现出来。例如,我们希望将具有100或1000个特征的数据集的重要关联关系在图中可视化出来,可视化1000维度的数据是非常具有挑战性的,我们可以通过降维技术将数据维度减少到二维或三维,这就很容易实现可视化了。 > Some important dimensionality reduction algorithms that we will discuss are principal component analysis (see [In Depth: Principal Component Analysis](05.09-Principal-Component-Analysis.ipynb)) and various manifold learning algorithms, including Isomap and locally linear embedding (See [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb)). 我们在本章中会介绍一些重要的降维算法,包括主成分分析(参见[深入:主成分分析](05.09-Principal-Component-Analysis.ipynb))和不同的流形学习算法,如Isomap和局部线性嵌入(参见[深入:流形学习](05.10-Manifold-Learning.ipynb))。 ## Summary ## 总结 > Here we have seen a few simple examples of some of the basic types of machine learning approaches. Needless to say, there are a number of important practical details that we have glossed over, but I hope this section was enough to give you a basic idea of what types of problems machine learning approaches can solve. 本节中我们看到了一些基本机器学习方法的简单例子。无需说明也看得出来,我们只是一笔带过的进行了相关介绍,但通过本节的内容希望能为读者提供了关于机器学习方法能够解决的问题类型的基本概念。 > In short, we saw the following: > - *Supervised learning*: Models that can predict labels based on labeled training data > - *Classification*: Models that predict labels as two or more discrete categories > - *Regression*: Models that predict continuous labels > - *Unsupervised learning*: Models that identify structure in unlabeled data > - *Clustering*: Models that detect and identify distinct groups in the data > - *Dimensionality reduction*: Models that detect and identify lower-dimensional structure in higher-dimensional data 简单来说,有如下的主要几个方面: - *有监督学习*:建立一个能够根据带标记的训练数据对数据进行标签预测的模型 - *分类*:建立一个能够预测两个或多个离散分组标签的模型 - *回归*:建立一个能够预测连续标签的模型 - *无监督学习*:建立一个能够识别未标记数据内在结构的模型 - *聚类*:建立一个检查和识别数据不同分组的模型 - *降维*:建立一个能发现高维度数据在低维度情况下结构的模型 > In the following sections we will go into much greater depth within these categories, and see some more interesting examples of where these concepts can be useful. 在后续章节中,我们会深入到上述的这些机器学习方法类型中,还有看到更多这些方法能发挥作用的有趣的例子。 > All of the figures in the preceding discussion are generated based on actual machine learning computations; the code behind them can be found in [Appendix: Figure Code](06.00-Figure-Code.ipynb). 本节中所有的图像都是使用真实的机器学习计算生成的;产生图像的代码可以在[附录:生成图像的代码](06.00-Figure-Code.ipynb)中找到。 <!--NAVIGATION--> < [机器学习](05.00-Machine-Learning.ipynb) | [目录](Index.ipynb) | [Scikit-Learn简介](05.02-Introducing-Scikit-Learn.ipynb) > <a href="https://colab.research.google.com/github/wangyingsm/Python-Data-Science-Handbook/blob/master/notebooks/05.01-What-Is-Machine-Learning.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
0.66356
0.992795
# Random Forest ``` from sklearn.model_selection import RandomizedSearchCV, cross_val_score from sklearn.ensemble import RandomForestRegressor from scipy.stats import randint from time import time import pandas as pd import os if '__file__' in locals(): current_folder = os.path.dirname(os.path.abspath(__file__)) else: current_folder = os.getcwd() merge_features = '"{}"'.format(os.path.join(current_folder, '..', 'Features', 'Merge features.ipynb')) calcular_auc = '"{}"'.format(os.path.join(current_folder, '..', 'Calcular AUC.ipynb')) set_de_entrenamiento_testing_y_prediccion = '"{}"'.format(os.path.join( current_folder, '..', 'Set de entrenamiento, testing y predicción.ipynb' )) hiperparametros_csv = os.path.join(current_folder, 'hiperparametros', 'random_forest.csv') pd.options.mode.chained_assignment = None %run $merge_features assert(df_features.shape[0] == df['person'].unique().shape[0]) ``` Cargo los sets de entrenamiento, testing y predicción. ``` %run $set_de_entrenamiento_testing_y_prediccion labels_with_features = labels.merge(df_features, how='inner', on='person') data = labels_with_features.drop('label', axis=1) target = labels_with_features['label'] len(data.columns) ``` ## Entrenamiento rápido Obtenemos las métricas con cross validation. ``` param = { 'bootstrap': True, 'max_depth': 15, 'max_features': 124, 'min_samples_leaf': 74, 'min_samples_split': 6, 'n_estimators': 126 } cv_splits = 10 # cantidad de splits en el cross validation regr = RandomForestRegressor(**param) %%time scores = cross_val_score(regr, data, target, cv=cv_splits, scoring='roc_auc') print("Accuracy: %0.6f (+/- %0.6f)" % (scores.mean(), scores.std() * 2)) %%time scores = cross_val_score(regr, data, target, cv=cv_splits, scoring='roc_auc') print("Accuracy: %0.6f (+/- %0.6f)" % (scores.mean(), scores.std() * 2)) ``` ## Feature importance ``` regr.fit(data, target); feature_importance = pd.DataFrame(data={ 'columna':data.columns, 'importancia':regr.feature_importances_ }).set_index('columna') hashing_features = ['feature_hashing_timestamp_days'] for hashing_feature in hashing_features: hashing_importance = feature_importance[feature_importance.index.str.startswith(hashing_feature)].sum() feature_importance = feature_importance[~feature_importance.index.str.startswith(hashing_feature)] feature_importance.loc[hashing_feature] = hashing_importance feature_importance.sort_values('importancia', ascending=False) ``` # Hiperparámetros En esta sección vamos a buscar los hiperparámetros de random forest con un Random Search y cross validation. Para construir este Random Search se usó como base el código de sklearn https://scikit-learn.org/stable/auto_examples/model_selection/plot_randomized_search.html#sphx-glr-auto-examples-model-selection-plot-randomized-search-py. Hiperparámetros a probar. ``` param_dist = { 'n_estimators': list(range(1,150,5)), 'max_depth': list(range(5,80,5)), 'max_features': randint(1, data.shape[1]), 'min_samples_split': randint(2, 11), 'min_samples_leaf': randint(2, 100), 'bootstrap': [True, False] } cv_splits = 10 # cantidad de splits en el cross validation n_iter_search = 20 # cantidad de puntos, en total splits*n_iter_search RF a probar regr = RandomForestRegressor() ``` Nota: hay más info en la consola desde la cual se corre jupyter. Se puede aumentar *n_jobs* para que corra más procesos en paralelo, pero se corre el riesgo de que se cuelgue por falta de memoria. Recomiendo que prueben ir aumentando *n_jobs* con un *n_iter_search* bajo hasta encontrar el mayor *n_jobs* que se banque su compu. ``` random_search = RandomizedSearchCV(regr, param_distributions=param_dist, iid=False, refit=True, verbose=10, return_train_score=True, n_iter=n_iter_search, cv=cv_splits, scoring='roc_auc', n_jobs=2); start = time() random_search.fit(data, target) print("RandomizedSearchCV took %.2f seconds for %d candidates" " parameter settings." % ((time() - start), n_iter_search)) ``` El **mejor** Random Forest fue: ``` print('score: {}'.format(random_search.best_score_)) random_search.best_params_ print('score: {}'.format(random_search.best_score_)) random_search.best_params_ ``` El resultado de la búsqueda la podemos importar a un DataFrame de Pandas y analizarlo. ``` stats_training = pd.DataFrame(data=random_search.cv_results_) stats_training.head(2) ``` Escribo el mejor resultado en un archivo. ``` hyperparameter_data = { 'algorithm': 'random_forest', 'hyperparameters': random_search.best_params_, 'cv_splits': cv_splits, 'auc': random_search.best_score_, 'features': data.columns } %run -i write_hyperparameters.py ```
github_jupyter
from sklearn.model_selection import RandomizedSearchCV, cross_val_score from sklearn.ensemble import RandomForestRegressor from scipy.stats import randint from time import time import pandas as pd import os if '__file__' in locals(): current_folder = os.path.dirname(os.path.abspath(__file__)) else: current_folder = os.getcwd() merge_features = '"{}"'.format(os.path.join(current_folder, '..', 'Features', 'Merge features.ipynb')) calcular_auc = '"{}"'.format(os.path.join(current_folder, '..', 'Calcular AUC.ipynb')) set_de_entrenamiento_testing_y_prediccion = '"{}"'.format(os.path.join( current_folder, '..', 'Set de entrenamiento, testing y predicción.ipynb' )) hiperparametros_csv = os.path.join(current_folder, 'hiperparametros', 'random_forest.csv') pd.options.mode.chained_assignment = None %run $merge_features assert(df_features.shape[0] == df['person'].unique().shape[0]) %run $set_de_entrenamiento_testing_y_prediccion labels_with_features = labels.merge(df_features, how='inner', on='person') data = labels_with_features.drop('label', axis=1) target = labels_with_features['label'] len(data.columns) param = { 'bootstrap': True, 'max_depth': 15, 'max_features': 124, 'min_samples_leaf': 74, 'min_samples_split': 6, 'n_estimators': 126 } cv_splits = 10 # cantidad de splits en el cross validation regr = RandomForestRegressor(**param) %%time scores = cross_val_score(regr, data, target, cv=cv_splits, scoring='roc_auc') print("Accuracy: %0.6f (+/- %0.6f)" % (scores.mean(), scores.std() * 2)) %%time scores = cross_val_score(regr, data, target, cv=cv_splits, scoring='roc_auc') print("Accuracy: %0.6f (+/- %0.6f)" % (scores.mean(), scores.std() * 2)) regr.fit(data, target); feature_importance = pd.DataFrame(data={ 'columna':data.columns, 'importancia':regr.feature_importances_ }).set_index('columna') hashing_features = ['feature_hashing_timestamp_days'] for hashing_feature in hashing_features: hashing_importance = feature_importance[feature_importance.index.str.startswith(hashing_feature)].sum() feature_importance = feature_importance[~feature_importance.index.str.startswith(hashing_feature)] feature_importance.loc[hashing_feature] = hashing_importance feature_importance.sort_values('importancia', ascending=False) param_dist = { 'n_estimators': list(range(1,150,5)), 'max_depth': list(range(5,80,5)), 'max_features': randint(1, data.shape[1]), 'min_samples_split': randint(2, 11), 'min_samples_leaf': randint(2, 100), 'bootstrap': [True, False] } cv_splits = 10 # cantidad de splits en el cross validation n_iter_search = 20 # cantidad de puntos, en total splits*n_iter_search RF a probar regr = RandomForestRegressor() random_search = RandomizedSearchCV(regr, param_distributions=param_dist, iid=False, refit=True, verbose=10, return_train_score=True, n_iter=n_iter_search, cv=cv_splits, scoring='roc_auc', n_jobs=2); start = time() random_search.fit(data, target) print("RandomizedSearchCV took %.2f seconds for %d candidates" " parameter settings." % ((time() - start), n_iter_search)) print('score: {}'.format(random_search.best_score_)) random_search.best_params_ print('score: {}'.format(random_search.best_score_)) random_search.best_params_ stats_training = pd.DataFrame(data=random_search.cv_results_) stats_training.head(2) hyperparameter_data = { 'algorithm': 'random_forest', 'hyperparameters': random_search.best_params_, 'cv_splits': cv_splits, 'auc': random_search.best_score_, 'features': data.columns } %run -i write_hyperparameters.py
0.522933
0.76908
``` import psycopg2 import config as creds import sqlalchemy from sqlalchemy import create_engine import numpy as np from numpy.random import randn import pandas as pd from scipy import stats from datetime import datetime ``` ELECTRICITY for years 2018-2019, INDIVIDUAL BUILDING TYPES Foreign keys: One Hot encode categorical features YEARBUILT and WARD, exclude DCREALPROPERTYID Numeric features: sqft, awnd, cldd, htdd, snow Target feature: kbtu VotingRegressor - "Retail" CONNECT TO DATABASE: ``` user=creds.PGUSER password=creds.PGPASSWORD host=creds.PGHOST port=5432 database=creds.PGDATABASE engine_str=f"postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}" engine = create_engine(engine_str) conn = engine.raw_connection() print('Connected') cur = conn.cursor() print('Cursor created') ``` EXTRACT DATASET: ``` query=''' SELECT b.kbtu ,b.REPORTEDBUILDINGGROSSFLOORAREA ,b.dcrealpropertyid ,b.ward ,b.yearbuilt ,b.primarypropertytype_selfselect ,b.elegas ,n.awnd ,n.cldd ,n.htdd ,n.snow ,n.tavg ,n.wdf2 ,n.wdf5 ,n.wsf2 ,n.wsf5 ,n.date FROM buildings_data b LEFT OUTER join noaa_data n ON b.REPORTINGYEAR = n.WEATHERYEAR WHERE b.MONTH = n.MONTH AND b.ELEGAS = 'E' AND b.PRIMARYPROPERTYTYPE_SELFSELECT = '15' AND b.REPORTINGYEAR BETWEEN 2018 AND 2019 AND b.YEARBUILT > 0 AND b.REPORTEDBUILDINGGROSSFLOORAREA > 50000; ''' data=pd.read_sql(query,conn) data.head() data.isnull().values.any() ``` FORMAT COLUMNS: ``` #CONVERT 'Date' COLUMN TO datetime format #data["reportingyear"] = data["reportingyear"].astype(str) #data['month']=data['month'].apply(lambda x: '{0:0>2}'.format(x)) #data['date_time'] = data[['reportingyear', 'month']].agg('-'.join, axis=1) #data['date_time'] = (data.date_time + "-01") #data['date_time'] = datetime.strptime('date_time', "%Y-%m-%d") data['datetime']=pd.to_datetime(data['date']) data['primarypropertytype_selfselect'].dtype data['primarypropertytype_selfselect']=data['primarypropertytype_selfselect'].astype('int32') data.set_index('datetime', inplace=True) data.head() data.columns data.dtypes import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(25,15)) ftr = list(["kbtu", "reportedbuildinggrossfloorarea", "ward", "yearbuilt", "awnd", "cldd", "htdd", "snow"]) corrMatrix = data[ftr].corr() sns.heatmap(corrMatrix, annot=True, fmt='.1f', linewidths=.5) ``` DEFINE FEATURES: ``` TARGET = "kbtu" COLS = ['reportedbuildinggrossfloorarea', 'ward', 'yearbuilt', 'awnd', 'cldd', 'htdd', 'snow', 'datetime'] def make_sklearn_data(df=data, target=TARGET, cols=COLS): df = df.reset_index() X, y = df[cols], df[target] return X, y features = ['reportedbuildinggrossfloorarea', 'ward', 'yearbuilt', 'awnd', 'cldd', 'htdd', 'snow'] X, y = make_sklearn_data(cols=features) #Rank2D from yellowbrick.features import Rank2D # Instantiate the visualizer with the Pearson ranking algorithm visualizer = Rank2D(algorithm='pearson', features=features, size=(1080, 720)) visualizer.fit(X, y) visualizer.transform(X) visualizer.show() # Instantiate the visualizer with the Covariance algorithm visualizer = Rank2D(algorithm='covariance', features=features, size=(1080, 720)) visualizer.fit(X, y) visualizer.transform(X) visualizer.show() #Feature Importances import yellowbrick as yb from sklearn.ensemble import RandomForestRegressor from yellowbrick.features import RadViz from yellowbrick.features import FeatureImportances model = RandomForestRegressor(n_estimators=10) viz = FeatureImportances(model, labels=features, size=(1080, 720)) viz.fit(X, y) viz.show() #Feature Importances import yellowbrick as yb from sklearn.linear_model import Lasso from yellowbrick.features import RadViz from yellowbrick.features import FeatureImportances model = Lasso() viz = FeatureImportances(model, labels=features, size=(1080, 720)) viz.fit(X, y) viz.show() #CYCLIC ENCODER: to capture temporal cycles (yearly). from sklearn.base import BaseEstimator, TransformerMixin class CyclicEncoder(BaseEstimator, TransformerMixin): def __init__(self, date_extract="month"): if date_extract not in {"minute", "hour", "week", "month", "year"}: raise ValueError(f"specify correct date component to extract, not {date_extract}") self.date_extract = date_extract def get_date_component(self, x): if self.date_extract == "month": return x.dt.month elif self.date_extract == "year": return x.dt.year else: raise NotImplementedError(f"{self.date_extract} date component not implemented yet") def fit(self, X, y=None): self.cycle_max_ = self.get_date_component(X).max() return self def transform(self, X, y=None): cols = [] names = [] x = self.get_date_component(X) xn = 2 * np.pi * x / self.cycle_max_ cols.append(np.cos(xn)) names.append(f"{X.name}_cos") cols.append(np.sin(xn)) names.append(f"{X.name}_sin") return pd.DataFrame(np.asarray(cols).T, columns=names) ce = CyclicEncoder().fit_transform(data.reset_index()["datetime"]) ce.plot(x="datetime_cos", y="datetime_sin", kind="scatter") #FEATURE EXTRACTION from sklearn.base import clone from sklearn.compose import ColumnTransformer from sklearn.pipeline import FeatureUnion, Pipeline from sklearn.preprocessing import OneHotEncoder extraction = Pipeline([ ('column_selection', ColumnTransformer([ ('time_components', FeatureUnion([ ('month', CyclicEncoder(date_extract='month')), ('year', CyclicEncoder(date_extract='year')), ]), 'datetime'), ('ward_one_hot', OneHotEncoder(handle_unknown='ignore'), ['ward']), ('yearbuilt_one_hot', OneHotEncoder(handle_unknown='ignore'), ['yearbuilt']), ], remainder="passthrough")), ]) def make_energy_pipeline(model, append_transformers=None, fe=extraction): pipe = clone(fe) if append_transformers: for step in append_transformers: pipe.steps.append(step) pipe.steps.append(["model", clone(model)]) return pipe #Test the Feature Extraction Pipeline from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split as tts X_train, X_test, y_train, y_test = tts(*make_sklearn_data(), test_size=0.2) model = make_energy_pipeline(LinearRegression()) model.fit(X_train, y_train) model.score(X_test, y_test) #TIME SERIES CROSS VALIDATION from functools import partial from sklearn.metrics import make_scorer from sklearn.model_selection import cross_val_score from sklearn.model_selection import TimeSeriesSplit from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error rmse = partial(mean_squared_error, squared=False) def time_series_evaluate(model, X, y): """ Performs time series cross validation on the model, returning the cross validated r2, mse, and mae of the regressor, along with the final fitted model, fitted on all of the data. """ cv = TimeSeriesSplit(12) scores = {} scores["r2"] = cross_val_score(model, X, y, cv=cv, scoring=make_scorer(r2_score)) scores["mse"] = cross_val_score(model, X, y, cv=cv, scoring=make_scorer(mean_squared_error)) # scores["rmse"] = cross_val_score(model, X, y, cv=cv, scoring=make_scorer(rmse)) scores["mae"] = cross_val_score(model, X, y, cv=cv, scoring=make_scorer(mean_absolute_error)) model.fit(X, y) return model, scores # LINEAR MODEL X, y = make_sklearn_data() lm = make_energy_pipeline(LinearRegression()) time_series_evaluate(lm, X, y) #Second order polynomial regression from sklearn.linear_model import SGDRegressor from sklearn.preprocessing import PolynomialFeatures qm = make_energy_pipeline(SGDRegressor(), [('quad', PolynomialFeatures(2))]) time_series_evaluate(qm, X, y) from sklearn.ensemble import RandomForestRegressor rfm = make_energy_pipeline(RandomForestRegressor(n_estimators=10, max_depth=3)) time_series_evaluate(rfm, X, y) import time from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import VotingRegressor start = time.time() r1 = LinearRegression() r2 = RandomForestRegressor(n_estimators=10, random_state=1) X, y = make_sklearn_data() er = make_energy_pipeline(VotingRegressor([('lr', r1), ('rf', r2)])) print(time_series_evaluate(er, X, y)) print("Time = {:0.3f} seconds".format(time.time()-start)) conn.close() print('Closed') ```
github_jupyter
import psycopg2 import config as creds import sqlalchemy from sqlalchemy import create_engine import numpy as np from numpy.random import randn import pandas as pd from scipy import stats from datetime import datetime user=creds.PGUSER password=creds.PGPASSWORD host=creds.PGHOST port=5432 database=creds.PGDATABASE engine_str=f"postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}" engine = create_engine(engine_str) conn = engine.raw_connection() print('Connected') cur = conn.cursor() print('Cursor created') query=''' SELECT b.kbtu ,b.REPORTEDBUILDINGGROSSFLOORAREA ,b.dcrealpropertyid ,b.ward ,b.yearbuilt ,b.primarypropertytype_selfselect ,b.elegas ,n.awnd ,n.cldd ,n.htdd ,n.snow ,n.tavg ,n.wdf2 ,n.wdf5 ,n.wsf2 ,n.wsf5 ,n.date FROM buildings_data b LEFT OUTER join noaa_data n ON b.REPORTINGYEAR = n.WEATHERYEAR WHERE b.MONTH = n.MONTH AND b.ELEGAS = 'E' AND b.PRIMARYPROPERTYTYPE_SELFSELECT = '15' AND b.REPORTINGYEAR BETWEEN 2018 AND 2019 AND b.YEARBUILT > 0 AND b.REPORTEDBUILDINGGROSSFLOORAREA > 50000; ''' data=pd.read_sql(query,conn) data.head() data.isnull().values.any() #CONVERT 'Date' COLUMN TO datetime format #data["reportingyear"] = data["reportingyear"].astype(str) #data['month']=data['month'].apply(lambda x: '{0:0>2}'.format(x)) #data['date_time'] = data[['reportingyear', 'month']].agg('-'.join, axis=1) #data['date_time'] = (data.date_time + "-01") #data['date_time'] = datetime.strptime('date_time', "%Y-%m-%d") data['datetime']=pd.to_datetime(data['date']) data['primarypropertytype_selfselect'].dtype data['primarypropertytype_selfselect']=data['primarypropertytype_selfselect'].astype('int32') data.set_index('datetime', inplace=True) data.head() data.columns data.dtypes import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(25,15)) ftr = list(["kbtu", "reportedbuildinggrossfloorarea", "ward", "yearbuilt", "awnd", "cldd", "htdd", "snow"]) corrMatrix = data[ftr].corr() sns.heatmap(corrMatrix, annot=True, fmt='.1f', linewidths=.5) TARGET = "kbtu" COLS = ['reportedbuildinggrossfloorarea', 'ward', 'yearbuilt', 'awnd', 'cldd', 'htdd', 'snow', 'datetime'] def make_sklearn_data(df=data, target=TARGET, cols=COLS): df = df.reset_index() X, y = df[cols], df[target] return X, y features = ['reportedbuildinggrossfloorarea', 'ward', 'yearbuilt', 'awnd', 'cldd', 'htdd', 'snow'] X, y = make_sklearn_data(cols=features) #Rank2D from yellowbrick.features import Rank2D # Instantiate the visualizer with the Pearson ranking algorithm visualizer = Rank2D(algorithm='pearson', features=features, size=(1080, 720)) visualizer.fit(X, y) visualizer.transform(X) visualizer.show() # Instantiate the visualizer with the Covariance algorithm visualizer = Rank2D(algorithm='covariance', features=features, size=(1080, 720)) visualizer.fit(X, y) visualizer.transform(X) visualizer.show() #Feature Importances import yellowbrick as yb from sklearn.ensemble import RandomForestRegressor from yellowbrick.features import RadViz from yellowbrick.features import FeatureImportances model = RandomForestRegressor(n_estimators=10) viz = FeatureImportances(model, labels=features, size=(1080, 720)) viz.fit(X, y) viz.show() #Feature Importances import yellowbrick as yb from sklearn.linear_model import Lasso from yellowbrick.features import RadViz from yellowbrick.features import FeatureImportances model = Lasso() viz = FeatureImportances(model, labels=features, size=(1080, 720)) viz.fit(X, y) viz.show() #CYCLIC ENCODER: to capture temporal cycles (yearly). from sklearn.base import BaseEstimator, TransformerMixin class CyclicEncoder(BaseEstimator, TransformerMixin): def __init__(self, date_extract="month"): if date_extract not in {"minute", "hour", "week", "month", "year"}: raise ValueError(f"specify correct date component to extract, not {date_extract}") self.date_extract = date_extract def get_date_component(self, x): if self.date_extract == "month": return x.dt.month elif self.date_extract == "year": return x.dt.year else: raise NotImplementedError(f"{self.date_extract} date component not implemented yet") def fit(self, X, y=None): self.cycle_max_ = self.get_date_component(X).max() return self def transform(self, X, y=None): cols = [] names = [] x = self.get_date_component(X) xn = 2 * np.pi * x / self.cycle_max_ cols.append(np.cos(xn)) names.append(f"{X.name}_cos") cols.append(np.sin(xn)) names.append(f"{X.name}_sin") return pd.DataFrame(np.asarray(cols).T, columns=names) ce = CyclicEncoder().fit_transform(data.reset_index()["datetime"]) ce.plot(x="datetime_cos", y="datetime_sin", kind="scatter") #FEATURE EXTRACTION from sklearn.base import clone from sklearn.compose import ColumnTransformer from sklearn.pipeline import FeatureUnion, Pipeline from sklearn.preprocessing import OneHotEncoder extraction = Pipeline([ ('column_selection', ColumnTransformer([ ('time_components', FeatureUnion([ ('month', CyclicEncoder(date_extract='month')), ('year', CyclicEncoder(date_extract='year')), ]), 'datetime'), ('ward_one_hot', OneHotEncoder(handle_unknown='ignore'), ['ward']), ('yearbuilt_one_hot', OneHotEncoder(handle_unknown='ignore'), ['yearbuilt']), ], remainder="passthrough")), ]) def make_energy_pipeline(model, append_transformers=None, fe=extraction): pipe = clone(fe) if append_transformers: for step in append_transformers: pipe.steps.append(step) pipe.steps.append(["model", clone(model)]) return pipe #Test the Feature Extraction Pipeline from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split as tts X_train, X_test, y_train, y_test = tts(*make_sklearn_data(), test_size=0.2) model = make_energy_pipeline(LinearRegression()) model.fit(X_train, y_train) model.score(X_test, y_test) #TIME SERIES CROSS VALIDATION from functools import partial from sklearn.metrics import make_scorer from sklearn.model_selection import cross_val_score from sklearn.model_selection import TimeSeriesSplit from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error rmse = partial(mean_squared_error, squared=False) def time_series_evaluate(model, X, y): """ Performs time series cross validation on the model, returning the cross validated r2, mse, and mae of the regressor, along with the final fitted model, fitted on all of the data. """ cv = TimeSeriesSplit(12) scores = {} scores["r2"] = cross_val_score(model, X, y, cv=cv, scoring=make_scorer(r2_score)) scores["mse"] = cross_val_score(model, X, y, cv=cv, scoring=make_scorer(mean_squared_error)) # scores["rmse"] = cross_val_score(model, X, y, cv=cv, scoring=make_scorer(rmse)) scores["mae"] = cross_val_score(model, X, y, cv=cv, scoring=make_scorer(mean_absolute_error)) model.fit(X, y) return model, scores # LINEAR MODEL X, y = make_sklearn_data() lm = make_energy_pipeline(LinearRegression()) time_series_evaluate(lm, X, y) #Second order polynomial regression from sklearn.linear_model import SGDRegressor from sklearn.preprocessing import PolynomialFeatures qm = make_energy_pipeline(SGDRegressor(), [('quad', PolynomialFeatures(2))]) time_series_evaluate(qm, X, y) from sklearn.ensemble import RandomForestRegressor rfm = make_energy_pipeline(RandomForestRegressor(n_estimators=10, max_depth=3)) time_series_evaluate(rfm, X, y) import time from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import VotingRegressor start = time.time() r1 = LinearRegression() r2 = RandomForestRegressor(n_estimators=10, random_state=1) X, y = make_sklearn_data() er = make_energy_pipeline(VotingRegressor([('lr', r1), ('rf', r2)])) print(time_series_evaluate(er, X, y)) print("Time = {:0.3f} seconds".format(time.time()-start)) conn.close() print('Closed')
0.473414
0.423458
## 4. Fundamentals of Machine Learning ### Four Branches of Machine Learning Machine Learning algorithms generally fall into four broad categories. A **Supervised Learning** algorithm maps input data to known responses / targets. Applied examples of supervised learning include optical character recognition, speech recognition, image clasification and language translation. Beyond classification & regression, there are other sub-branches: - **Sequence Generation**: Given a picture, predict a caption describing it. Sequence generation can sometimes be reformulated as a series of classification problems - **Syntax Tree Prediction**: Given a sentence, predict its decomposition into a syntax tree - **Object Detection**: Given a picture, draw a bounding box around objects in the picture. - **Image Segmentation**: Given a picture, draw a pixel-level mask on a specific object An **Unsupervised Learning** finds interesting transformations of the input data without any help of any targets. Some uses of unsupservised learning are data visualisation, data compression and data denoising (de-noising). Unsupervised learning is the bread & butter of data analytics and is usually necessary to understand a dataset before attempting to solve a supervised-learning problem. Common sub-branches of unsupervised learning are dimensionality reduction and clustering. **Self-supervised Learning** is supervised learning without human-annotated labels. Labels are involved but they are generated from the input data, typically using a heuristic algorithm. Autoencoders are well-known self-supervised learning techniques. In **Reinforcement Learning**, an agent receives information about its an environment and learns to choose actions that will maximise some reward. For example, a neural network "looks" at a video game screen and outputs game actions in order to maximise its score. Currently, it mostly research and hasn't had significant practical successes beyond games. Some implementations of reinforcement learning are DeepMind. ### Model Evaluation In machine learning, the goal is to achieve models that **generalise**, that perform well on never-before-seen data. When a model perform well on the training set but not on the validation set, we say that it is **overfitting**. To generalise well, we split the data to <u>training sets</u>, <u>validation sets</u> and <u>test sets</u>. We train the model on the training set, evaluate on the validation set and do 1 final check on the test set. Developing a model always involves tuning its hyperparameters e.g. number of layers. Tuning hyperparameters is a form of learning too. In a train-validation 2-way split, what usually happens is we overfit the validation set. This is called information leaks into the validation set. The model performs artificially well on the test set. And this probably means the model might not generalise well on "unseen data". There are 2 common ways to split the data: - train-test split method (hold-out validation). Train on training data, and evaluate performance on test data. - k-fold cross validation method. For every iteration, use one of k partitions to test, while using the rest of the dataset to train. Beyond this, we can shuffle the dataset p-shuffles before doing the k-fold cross validation. <b>Things to be aware of when doing the splits</b> - Training set and test-set should be representative of the overall data. Usually it's good to stratify sample starting from the targets. If the original dataset has 2 labels split 40%-60%, then the training set and test set should both have this even split. - If we want to perform time series analysis then we should not shuffle before training - Ensure that as far as possible, during the split step, the results are that the train and test sets are disjoint. No data point exists in both training and test set. ### Data Preprocessing, Feature Engineering, Feature Learning After selecting the model and knowing how to tune the model, we now turn to the data that is fed into the model. Data needs to be preprocessed so the model can consume it. Also, we want to transform / impute data to ensure the predictions are sound. #### <u>PREPROCESSING</u> <b>Vectorisation</b> - Most of the times before feeding to neural networks, we need to ensure data is in tensor form. This process is data vectorization. It is usually easy with numerical features but some transformations need to be done for text & image form data. <b>Normalisation</b> - In most datasets, different features have different ranges, some larger than others. So it's generally safe to perform normalisation on the data before feeding it to the network. Normalisation means the feature has a mean of 0 and a variance of 1. <b>Imputation / Handling Missing Values</b> - Sometimes, a feature might have some values missing. So it is good to handle missing values by performing imputation. It can be imputed with 0 for missing values. If you expect the test data to have missing values for a particular feature, it will be good to have that property in the training data too, so the network will know to drop that value during training. #### <u>FEATURE ENGINEERING</u> Feature engineering is the process of using domain knowledge to apply transformations of the data that make the learning easier (in this sense, find patterns more easily). High quality features allow you to solve ML models more elegantly with fewer resources. Also, they let you solve a problem with much less data. Deep learning models usually learn with more data available, so with fewer data points, high quality features is critical.
github_jupyter
## 4. Fundamentals of Machine Learning ### Four Branches of Machine Learning Machine Learning algorithms generally fall into four broad categories. A **Supervised Learning** algorithm maps input data to known responses / targets. Applied examples of supervised learning include optical character recognition, speech recognition, image clasification and language translation. Beyond classification & regression, there are other sub-branches: - **Sequence Generation**: Given a picture, predict a caption describing it. Sequence generation can sometimes be reformulated as a series of classification problems - **Syntax Tree Prediction**: Given a sentence, predict its decomposition into a syntax tree - **Object Detection**: Given a picture, draw a bounding box around objects in the picture. - **Image Segmentation**: Given a picture, draw a pixel-level mask on a specific object An **Unsupervised Learning** finds interesting transformations of the input data without any help of any targets. Some uses of unsupservised learning are data visualisation, data compression and data denoising (de-noising). Unsupervised learning is the bread & butter of data analytics and is usually necessary to understand a dataset before attempting to solve a supervised-learning problem. Common sub-branches of unsupervised learning are dimensionality reduction and clustering. **Self-supervised Learning** is supervised learning without human-annotated labels. Labels are involved but they are generated from the input data, typically using a heuristic algorithm. Autoencoders are well-known self-supervised learning techniques. In **Reinforcement Learning**, an agent receives information about its an environment and learns to choose actions that will maximise some reward. For example, a neural network "looks" at a video game screen and outputs game actions in order to maximise its score. Currently, it mostly research and hasn't had significant practical successes beyond games. Some implementations of reinforcement learning are DeepMind. ### Model Evaluation In machine learning, the goal is to achieve models that **generalise**, that perform well on never-before-seen data. When a model perform well on the training set but not on the validation set, we say that it is **overfitting**. To generalise well, we split the data to <u>training sets</u>, <u>validation sets</u> and <u>test sets</u>. We train the model on the training set, evaluate on the validation set and do 1 final check on the test set. Developing a model always involves tuning its hyperparameters e.g. number of layers. Tuning hyperparameters is a form of learning too. In a train-validation 2-way split, what usually happens is we overfit the validation set. This is called information leaks into the validation set. The model performs artificially well on the test set. And this probably means the model might not generalise well on "unseen data". There are 2 common ways to split the data: - train-test split method (hold-out validation). Train on training data, and evaluate performance on test data. - k-fold cross validation method. For every iteration, use one of k partitions to test, while using the rest of the dataset to train. Beyond this, we can shuffle the dataset p-shuffles before doing the k-fold cross validation. <b>Things to be aware of when doing the splits</b> - Training set and test-set should be representative of the overall data. Usually it's good to stratify sample starting from the targets. If the original dataset has 2 labels split 40%-60%, then the training set and test set should both have this even split. - If we want to perform time series analysis then we should not shuffle before training - Ensure that as far as possible, during the split step, the results are that the train and test sets are disjoint. No data point exists in both training and test set. ### Data Preprocessing, Feature Engineering, Feature Learning After selecting the model and knowing how to tune the model, we now turn to the data that is fed into the model. Data needs to be preprocessed so the model can consume it. Also, we want to transform / impute data to ensure the predictions are sound. #### <u>PREPROCESSING</u> <b>Vectorisation</b> - Most of the times before feeding to neural networks, we need to ensure data is in tensor form. This process is data vectorization. It is usually easy with numerical features but some transformations need to be done for text & image form data. <b>Normalisation</b> - In most datasets, different features have different ranges, some larger than others. So it's generally safe to perform normalisation on the data before feeding it to the network. Normalisation means the feature has a mean of 0 and a variance of 1. <b>Imputation / Handling Missing Values</b> - Sometimes, a feature might have some values missing. So it is good to handle missing values by performing imputation. It can be imputed with 0 for missing values. If you expect the test data to have missing values for a particular feature, it will be good to have that property in the training data too, so the network will know to drop that value during training. #### <u>FEATURE ENGINEERING</u> Feature engineering is the process of using domain knowledge to apply transformations of the data that make the learning easier (in this sense, find patterns more easily). High quality features allow you to solve ML models more elegantly with fewer resources. Also, they let you solve a problem with much less data. Deep learning models usually learn with more data available, so with fewer data points, high quality features is critical.
0.932944
0.992008
[Link to this document's Jupyter Notebook](./0324-MPI_Programming_in-class-assignment.ipynb) In order to successfully complete this assignment you need to participate both individually and in groups during class. If you attend class in-person then have one of the instructors check your notebook and sign you out before leaving class on **Wednesday March 24**. If you are attending asynchronously, turn in your assignment using D2L no later than **_11:59pm on Wednesday March 24**. --- # In-Class Assignment: MPI Programming Basics <img src="https://farm6.staticflickr.com/5182/5766506970_64806a7180_b.jpg" width="90%" alt="Picture of two children wispering to each other. Included as a motivation for the Rumor Mill example" > ### Agenda for today's class (70 minutes) 1. (20 minutes) [Pre class Review](#Pre-class-Review) 2. (30 minutes) [Pi Estimation](#Pi-Estimation) 3. (20 minutes) [Rumor Mill continued](#Rumor-Mill-continued) --- <a name=Pre-class-Review></a> # 1. Pre class Review - [0323--MPI_Syntax_pre-class-assignment](0323--MPI_Syntax_pre-class-assignment.ipynb) --- <a name=Pi-Estimation></a> # 2. Pi Estimation Lets go back and consider the pi estimation algorithm we used in [0217-OMP_Threads_in-class-assignment](0210-OMP_Threads_in-class-assignment.ipynb): ```c++ #include <omp.h> static long num_steps = 100000; double step; #define NUM_THREADS 2 void main () { int i, nthreads; double pi, sum[NUM_THREADS]; step = 1.0/(double) num_steps; omp_set_num_threads(NUM_THREADS); #pragma omp parallel { int i, id,nthrds; double x; id = omp_get_thread_num(); nthrds = omp_get_num_threads(); if (id == 0) nthreads = nthrds; for (i=id, sum[id]=0.0;i< num_steps; i=i+nthrds) { x = (i+0.5)*step; sum[id] += 4.0/(1.0+x*x); } } for(i=0, pi=0.0;i<nthreads;i++)pi += sum[i] * step; } ``` &#9989; **<font color=red>DO THIS:</font>** As a class, lets discuss the cons for moving this example to a Shared Memory system. Dispyte this being a poor example for running in MPI, lets spend today converting the code to an MPI version just to get an idea about how MPI would work. &#9989; **<font color=red>DO THIS:</font>** Take the above example and turn it into an MPI Only program. --- <a name=Rumor-Mill-continued></a> # 3. Rumor Mill continued For the last few minutes of class we will revisit the rumor mill example and see if we can start making modifications to run the example inside mpi. ----- ### Congratulations, we're done! If you attend class in-person then have one of the instructors check your notebook and sign you out before leaving class. If you are attending asynchronously, turn in your assignment using D2L. ### Course Resources: - [Website](https://msu-cmse-courses.github.io/cmse802-f20-student/) - [ZOOM](https://msu.zoom.us/j/98207034052) - [JargonJar](https://docs.google.com/document/d/1ahg48CCFhRzUL-QIHzlt_KEf1XqsCasFBU4iePHhcug/edit#) - [GIT](https://gitlab.msu.edu/colbrydi/cmse401-s21.git) Written by Dr. Dirk Colbry, Michigan State University <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/">Creative Commons Attribution-NonCommercial 4.0 International License</a>. ----
github_jupyter
0.096376
0.841826
``` import importlib import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.font_manager import random import time import collections import os import sys import pandas as pd import json matplotlib.rcParams['text.usetex'] = True plt.rc('font', family='serif') from matplotlib.pyplot import cm from at_aloha import AT from aloha_q import ALOHA_Q from eb_aloha import EB_ALOHA from aloha_qtf import QTF import experiments as exp ``` ### Run protocols and save the result as a json file ``` def run_ramp(protocol, n_runs=20, delayed=False, detect_energy=True, **kwargs): runs = [] for run_idx in range(n_runs): r = exp.ramp(protocol, seed=run_idx, delayAck=delayed, detect_energy=detect_energy, **kwargs) runs.append(r) print(".", end="") return runs def run_churn(protocol, n_runs=20, num_players=20, delayed=False, detect_energy=True, **kwargs): t0 = time.time() runs = [] for run_idx in range(n_runs): r = exp.churn(protocol, num_players=num_players, seed=run_idx, delayAck=delayed, detect_energy=detect_energy, **kwargs) runs.append(r) print(".", end="") return runs def at(**kwargs): return AT(**kwargs) def eb(**kwargs): return EB_ALOHA(**kwargs) def q(**kwargs): return ALOHA_Q(**kwargs) def qtf(**kwargs): return QTF(**kwargs) # runs_at = run_ramp(at, n_runs=20) # runs_eb = run_ramp(eb, n_runs=20) # runs_q = run_ramp(q, n_runs=20) runs_qtf = run_ramp(qtf, n_runs=20) exp.save_runs(runs_at, "ramp_at.json") exp.save_runs(runs_qtf, "ramp_qtf.json") exp.save_runs(runs_eb, "ramp_eb.json") exp.save_runs(runs_q, "ramp_q.json") ``` ### Read result from json and plot result ``` runs_at = exp.read_runs("ramp_at.json") runs_qtf = exp.read_runs("ramp_qtf.json") runs_eb = exp.read_runs("ramp_eb.json") runs_q = exp.read_runs("ramp_q.json") def plot_utilizations_w_err(utils, names=None, vertsize=2., title=None, loc=None, xlabel="Time blocks (1 block = 100 time slots)", fn=None, ramp=True, colors=['blue', 'red', 'green', 'brown', 'grey'], ): matplotlib.rcParams['figure.figsize'] = (5.0, vertsize) extra = { 'e1': (4, 2, 1, 2, 1, 2) } ls = ['-', '--', ':', '-.', 'e1'] fig, ax = plt.subplots() for i, u in enumerate(utils): y = np.vstack([r.total_utilization for r in u]) mean = np.average(y, axis=0) std = np.std(y, axis=0, ddof=1) er_pos = np.minimum(mean + std, 1.) er_neg = np.maximum(mean - std, 0.) x = np.arange(len(mean)) if ls[i] in extra: ax.plot(mean, color=colors[i], dashes=extra[ls[i]], label=names[i] if names else None) else: ax.plot(mean, color=colors[i], ls=ls[i], label=names[i] if names else None) ax.fill_between(x, er_pos, er_neg, facecolor=colors[i], alpha=0.2) plt.ylim((-0.05, 1.05)) plt.yticks(np.arange(0., 1.25, 0.25)) plt.ylabel("Network utilization") if ramp: x_position = np.array([t for i, t in enumerate(x) if i%50 ==0]) plt.xlim(-1, 301) else: x_position = np.array([t for i, t in enumerate(x) if i%50 ==0]) plt.xlim(-1, 201) plt.xticks(x_position, x_position) if title: plt.title(title) if names: plt.legend(loc="lower right") if xlabel: plt.xlabel(xlabel) ax.grid() if loc: plt.legend(loc=loc) if fn: plt.savefig(fn, bbox_inches='tight') plt.show() def plot_fairness(is_jain, run_sets, names=None, title=None, vertsize=2., ylabel=None, xlabel="Time blocks (1 block = 100 time slots)", fn=None, ramp=True, loc=None, colors=['blue', 'red', 'green', 'brown', 'grey'], ): matplotlib.rcParams['figure.figsize'] = (5.0, vertsize) fig, ax = plt.subplots() extra = { 'e1': (4, 2, 1, 2, 1, 2) } ls = ['-', '--', ':', '-.', "e1"] for i, runs in enumerate(run_sets): y = np.vstack([(r.jain if is_jain else r.bottom_fair_ratio) for r in runs]) mean = np.average(y, axis=0) std = np.std(y, axis=0, ddof=1) er_pos = np.minimum(mean + std, 1.) er_neg = np.maximum(mean - std, 0.) x = np.arange(len(mean)) if ls[i] in extra: ax.plot(mean, color=colors[i], dashes=extra[ls[i]], label=names[i] if names else None) else: ax.plot(mean, color=colors[i], ls=ls[i], label=names[i] if names else None) ax.fill_between(x, er_pos, er_neg, facecolor=colors[i], alpha=0.2) plt.ylim((-0.0, 1.05)) plt.yticks(np.arange(0., 1.25, 0.25)) if ramp: x_position = np.array([t for i, t in enumerate(x) if i%5 ==0]) plt.xlim(-0.1, 30) else: x_position = np.array([t for i, t in enumerate(x) if i%5 ==0]) plt.xlim(-0.1, 20.1) plt.xticks(x_position, x_position*10) if names: plt.legend() if title: plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) ax.grid() if loc: plt.legend(loc=loc) if fn: plt.savefig(fn, bbox_inches='tight') plt.show() plot_utilizations_w_err([runs_at, runs_qtf, runs_eb, runs_q], vertsize=3.75, names=["AT-ALOHA", "ALOHA-QTF", "EB-ALOHA", "ALOHA-Q"]) plot_fairness(True, [runs_at, runs_qtf, runs_eb, runs_q], vertsize=2.5, names=["AT-ALOHA", "ALOHA-QTF", "EB-ALOHA", "ALOHA-Q"]) ``` #### Plot success, collision and empty rate of one protocol ``` def plot_protocol(runs, title, vertsize=2, xlabel=None, fn=None): matplotlib.rcParams['figure.figsize'] = (5.0, vertsize) colors = {"success": 'black', "collision": 'red', "empty": 'green'} ls = {"success": '-', "collision": '--', "empty": '-.'} result = {"success": np.array([i.total_utilization for i in runs]), "collision": np.array([i.collisions for i in runs]), "empty": np.array([i.empty for i in runs]), } fig, ax = plt.subplots(dpi=80) for name, y in result.items(): mean = np.average(y, axis=0) std = np.std(y, axis=0, ddof=1) er_pos = np.minimum(mean + std, 1.) er_neg = np.maximum(mean - std, 0.) x = np.arange(len(mean)) ax.plot(mean, color=colors[name], ls=ls[name], label=name) ax.fill_between(x, er_pos, er_neg, facecolor=colors[name], alpha=0.2) plt.ylim((-0.05, 1.05)) plt.xlim(-1, len(mean)) plt.yticks(np.arange(0., 1.25, 0.25)) plt.ylabel("Network utilization") plt.title(title) plt.legend(loc="best") ax.grid() if xlabel: plt.xlabel(xlabel) if fn: plt.savefig(fn.format(node), bbox_inches='tight') plt.show() plot_protocol(runs_at, "AT-ALOHA") plot_protocol(runs_qtf, "ALOHA-QTF") plot_protocol(runs_eb, "EB-ALOHA") plot_protocol(runs_q, "ALOHA-Q") ```
github_jupyter
import importlib import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.font_manager import random import time import collections import os import sys import pandas as pd import json matplotlib.rcParams['text.usetex'] = True plt.rc('font', family='serif') from matplotlib.pyplot import cm from at_aloha import AT from aloha_q import ALOHA_Q from eb_aloha import EB_ALOHA from aloha_qtf import QTF import experiments as exp def run_ramp(protocol, n_runs=20, delayed=False, detect_energy=True, **kwargs): runs = [] for run_idx in range(n_runs): r = exp.ramp(protocol, seed=run_idx, delayAck=delayed, detect_energy=detect_energy, **kwargs) runs.append(r) print(".", end="") return runs def run_churn(protocol, n_runs=20, num_players=20, delayed=False, detect_energy=True, **kwargs): t0 = time.time() runs = [] for run_idx in range(n_runs): r = exp.churn(protocol, num_players=num_players, seed=run_idx, delayAck=delayed, detect_energy=detect_energy, **kwargs) runs.append(r) print(".", end="") return runs def at(**kwargs): return AT(**kwargs) def eb(**kwargs): return EB_ALOHA(**kwargs) def q(**kwargs): return ALOHA_Q(**kwargs) def qtf(**kwargs): return QTF(**kwargs) # runs_at = run_ramp(at, n_runs=20) # runs_eb = run_ramp(eb, n_runs=20) # runs_q = run_ramp(q, n_runs=20) runs_qtf = run_ramp(qtf, n_runs=20) exp.save_runs(runs_at, "ramp_at.json") exp.save_runs(runs_qtf, "ramp_qtf.json") exp.save_runs(runs_eb, "ramp_eb.json") exp.save_runs(runs_q, "ramp_q.json") runs_at = exp.read_runs("ramp_at.json") runs_qtf = exp.read_runs("ramp_qtf.json") runs_eb = exp.read_runs("ramp_eb.json") runs_q = exp.read_runs("ramp_q.json") def plot_utilizations_w_err(utils, names=None, vertsize=2., title=None, loc=None, xlabel="Time blocks (1 block = 100 time slots)", fn=None, ramp=True, colors=['blue', 'red', 'green', 'brown', 'grey'], ): matplotlib.rcParams['figure.figsize'] = (5.0, vertsize) extra = { 'e1': (4, 2, 1, 2, 1, 2) } ls = ['-', '--', ':', '-.', 'e1'] fig, ax = plt.subplots() for i, u in enumerate(utils): y = np.vstack([r.total_utilization for r in u]) mean = np.average(y, axis=0) std = np.std(y, axis=0, ddof=1) er_pos = np.minimum(mean + std, 1.) er_neg = np.maximum(mean - std, 0.) x = np.arange(len(mean)) if ls[i] in extra: ax.plot(mean, color=colors[i], dashes=extra[ls[i]], label=names[i] if names else None) else: ax.plot(mean, color=colors[i], ls=ls[i], label=names[i] if names else None) ax.fill_between(x, er_pos, er_neg, facecolor=colors[i], alpha=0.2) plt.ylim((-0.05, 1.05)) plt.yticks(np.arange(0., 1.25, 0.25)) plt.ylabel("Network utilization") if ramp: x_position = np.array([t for i, t in enumerate(x) if i%50 ==0]) plt.xlim(-1, 301) else: x_position = np.array([t for i, t in enumerate(x) if i%50 ==0]) plt.xlim(-1, 201) plt.xticks(x_position, x_position) if title: plt.title(title) if names: plt.legend(loc="lower right") if xlabel: plt.xlabel(xlabel) ax.grid() if loc: plt.legend(loc=loc) if fn: plt.savefig(fn, bbox_inches='tight') plt.show() def plot_fairness(is_jain, run_sets, names=None, title=None, vertsize=2., ylabel=None, xlabel="Time blocks (1 block = 100 time slots)", fn=None, ramp=True, loc=None, colors=['blue', 'red', 'green', 'brown', 'grey'], ): matplotlib.rcParams['figure.figsize'] = (5.0, vertsize) fig, ax = plt.subplots() extra = { 'e1': (4, 2, 1, 2, 1, 2) } ls = ['-', '--', ':', '-.', "e1"] for i, runs in enumerate(run_sets): y = np.vstack([(r.jain if is_jain else r.bottom_fair_ratio) for r in runs]) mean = np.average(y, axis=0) std = np.std(y, axis=0, ddof=1) er_pos = np.minimum(mean + std, 1.) er_neg = np.maximum(mean - std, 0.) x = np.arange(len(mean)) if ls[i] in extra: ax.plot(mean, color=colors[i], dashes=extra[ls[i]], label=names[i] if names else None) else: ax.plot(mean, color=colors[i], ls=ls[i], label=names[i] if names else None) ax.fill_between(x, er_pos, er_neg, facecolor=colors[i], alpha=0.2) plt.ylim((-0.0, 1.05)) plt.yticks(np.arange(0., 1.25, 0.25)) if ramp: x_position = np.array([t for i, t in enumerate(x) if i%5 ==0]) plt.xlim(-0.1, 30) else: x_position = np.array([t for i, t in enumerate(x) if i%5 ==0]) plt.xlim(-0.1, 20.1) plt.xticks(x_position, x_position*10) if names: plt.legend() if title: plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) ax.grid() if loc: plt.legend(loc=loc) if fn: plt.savefig(fn, bbox_inches='tight') plt.show() plot_utilizations_w_err([runs_at, runs_qtf, runs_eb, runs_q], vertsize=3.75, names=["AT-ALOHA", "ALOHA-QTF", "EB-ALOHA", "ALOHA-Q"]) plot_fairness(True, [runs_at, runs_qtf, runs_eb, runs_q], vertsize=2.5, names=["AT-ALOHA", "ALOHA-QTF", "EB-ALOHA", "ALOHA-Q"]) def plot_protocol(runs, title, vertsize=2, xlabel=None, fn=None): matplotlib.rcParams['figure.figsize'] = (5.0, vertsize) colors = {"success": 'black', "collision": 'red', "empty": 'green'} ls = {"success": '-', "collision": '--', "empty": '-.'} result = {"success": np.array([i.total_utilization for i in runs]), "collision": np.array([i.collisions for i in runs]), "empty": np.array([i.empty for i in runs]), } fig, ax = plt.subplots(dpi=80) for name, y in result.items(): mean = np.average(y, axis=0) std = np.std(y, axis=0, ddof=1) er_pos = np.minimum(mean + std, 1.) er_neg = np.maximum(mean - std, 0.) x = np.arange(len(mean)) ax.plot(mean, color=colors[name], ls=ls[name], label=name) ax.fill_between(x, er_pos, er_neg, facecolor=colors[name], alpha=0.2) plt.ylim((-0.05, 1.05)) plt.xlim(-1, len(mean)) plt.yticks(np.arange(0., 1.25, 0.25)) plt.ylabel("Network utilization") plt.title(title) plt.legend(loc="best") ax.grid() if xlabel: plt.xlabel(xlabel) if fn: plt.savefig(fn.format(node), bbox_inches='tight') plt.show() plot_protocol(runs_at, "AT-ALOHA") plot_protocol(runs_qtf, "ALOHA-QTF") plot_protocol(runs_eb, "EB-ALOHA") plot_protocol(runs_q, "ALOHA-Q")
0.33939
0.651216
## SQL Alchemy SQLAlchemy is the Python SQL toolkit and Object Relational Mapper that gives application developers the full power and flexibility of SQL.<br> It supports mulitple databases like SQLite, Postgresql, MySQL, Oracle, MS-SQL, Firebird, Sybase.<br> (https://www.sqlalchemy.org/)<br> (https://www.sqlalchemy.org/features.html)<br><br> In this tutorial page we will use SQL Alchemy as a interface for SQL queries using open DP library code. First we can check the output w.r.t. SQL access using smartnoise PostgresReader. <br> The example for data access is copied from (https://github.com/opendp/smartnoise-samples/tree/master/data) ``` ## use this code for uploading of data to database ''' from sqlalchemy import create_engine pums = pd.read_csv('readers/PUMS.csv') #alchemyEngine = create_engine('postgresql+psycopg2://postgres:[email protected]/pums', pool_recycle=3600); alchemyEngine = create_engine('mysql+pymysql://admin:password@gaganopendbmysql.clagiea8p62e.eu-west-1.rds.amazonaws.com/pums') SQLConnection = alchemyEngine.connect() SQLTable = "pums" pums.to_sql(SQLTable, SQLConnection, if_exists='fail', schema='pumst') ''' from opendp.smartnoise.metadata import CollectionMetadata from opendp.smartnoise.sql import PrivateReader, PostgresReader meta = CollectionMetadata.from_file('readers/PUMS_row.yaml') query = 'SELECT married, AVG(income) AS income, COUNT(*) AS n FROM pums.pumst GROUP BY married' reader = PostgresReader('database-1.clagiea8p62e.eu-west-1.rds.amazonaws.com', 'pums', 'postgres', password='password') private_reader = PrivateReader(reader, meta, 1.0) noisy = private_reader.execute(query) print(noisy) ``` Next we will use SQLAlchemy as a reader for Open DP SQL access ``` from opendp.smartnoise_t.sql import PrivateReader from opendp.smartnoise_t.metadata import CollectionMetadata from opendp.smartnoise_t.sql.reader.sqlalchemy import SQLAlchemyReader meta = CollectionMetadata.from_file('readers/PUMS_row.yaml') reader = SQLAlchemyReader('postgresql+psycopg2://postgres:[email protected]/pums') private_reader = PrivateReader(reader, meta, 1.0) noisy = private_reader.execute(query) print(noisy) ``` Next we will use SQLAlchemy for OpenDP data access from MYSQL database ``` meta = CollectionMetadata.from_file('readers/PUMS_row_mysql.yaml') query = 'SELECT married, AVG(income) AS income, COUNT(*) AS n FROM pumst.pums GROUP BY married' reader = SQLAlchemyReader(url='mysql+pymysql://admin:password@gaganopendbmysql.clagiea8p62e.eu-west-1.rds.amazonaws.com/pums') private_reader = PrivateReader(reader, meta, 1.0) noisy = private_reader.execute(query) print(noisy) ```
github_jupyter
## use this code for uploading of data to database ''' from sqlalchemy import create_engine pums = pd.read_csv('readers/PUMS.csv') #alchemyEngine = create_engine('postgresql+psycopg2://postgres:[email protected]/pums', pool_recycle=3600); alchemyEngine = create_engine('mysql+pymysql://admin:password@gaganopendbmysql.clagiea8p62e.eu-west-1.rds.amazonaws.com/pums') SQLConnection = alchemyEngine.connect() SQLTable = "pums" pums.to_sql(SQLTable, SQLConnection, if_exists='fail', schema='pumst') ''' from opendp.smartnoise.metadata import CollectionMetadata from opendp.smartnoise.sql import PrivateReader, PostgresReader meta = CollectionMetadata.from_file('readers/PUMS_row.yaml') query = 'SELECT married, AVG(income) AS income, COUNT(*) AS n FROM pums.pumst GROUP BY married' reader = PostgresReader('database-1.clagiea8p62e.eu-west-1.rds.amazonaws.com', 'pums', 'postgres', password='password') private_reader = PrivateReader(reader, meta, 1.0) noisy = private_reader.execute(query) print(noisy) from opendp.smartnoise_t.sql import PrivateReader from opendp.smartnoise_t.metadata import CollectionMetadata from opendp.smartnoise_t.sql.reader.sqlalchemy import SQLAlchemyReader meta = CollectionMetadata.from_file('readers/PUMS_row.yaml') reader = SQLAlchemyReader('postgresql+psycopg2://postgres:[email protected]/pums') private_reader = PrivateReader(reader, meta, 1.0) noisy = private_reader.execute(query) print(noisy) meta = CollectionMetadata.from_file('readers/PUMS_row_mysql.yaml') query = 'SELECT married, AVG(income) AS income, COUNT(*) AS n FROM pumst.pums GROUP BY married' reader = SQLAlchemyReader(url='mysql+pymysql://admin:password@gaganopendbmysql.clagiea8p62e.eu-west-1.rds.amazonaws.com/pums') private_reader = PrivateReader(reader, meta, 1.0) noisy = private_reader.execute(query) print(noisy)
0.264643
0.757413
# **Recipe Info GUI screen** ### Displays selected recommended recipe information with Title, URL, ingredients list, instructions and nutrient information. ### URL can be clicked directly to access the recipe source page ***Created by Rahul Maheshwari*** ``` #! /usr/bin/env python # -*- coding: utf-8 -*- import sys import webbrowser from tkinter import END import pandas as pd import pickle ``` **Populate GUI** ``` try: import Tkinter as tk except ImportError: import tkinter as tk try: import ttk py3 = False except ImportError: import tkinter.ttk as ttk py3 = True import recipe_info_support def vp_start_gui(): """Starting point when module is the main routine.""" global val, w, root root = tk.Tk() top = Toplevel1(root) recipe_info_support.init(root, top) root.mainloop() w = None def create_Toplevel1(rt, *args, **kwargs): """Starting point when module is imported by another module. Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' .""" global w, w_win, root # rt = root root = rt w = tk.Toplevel(root) top = Toplevel1(w) recipe_info_support.init(w, top, *args, **kwargs) return (w, top) def destroy_Toplevel1(): global w w.destroy() w = None ``` ## **Class containing methods to display the user selected recipe with URL, ingredients list, instructions and nutrient information in nice formatting.** ``` class Toplevel1: def callback(self, url): webbrowser.open_new(url) def __init__(self, top=None): """This class configures and populates the toplevel window. top is the toplevel containing window.""" _bgcolor = '#d9d9d9' # X11 color: 'gray85' _fgcolor = '#000000' # X11 color: 'black' _compcolor = '#d9d9d9' # X11 color: 'gray85' _ana1color = '#d9d9d9' # X11 color: 'gray85' _ana2color = '#ececec' # Closest X11 color: 'gray92' font10 = "-family {Century Gothic} -size 24 -weight bold" font11 = "-family {Century Gothic} -size 14" font12 = "-family {Century Gothic} -size 16 -weight bold" font13 = "-family {Century Gothic} -size 14 -weight bold " \ "-underline 1" font14 = "-family {Century Gothic} -size 16 -weight bold" font15 = "-family {Segoe UI} -size 12" font9 = "-family {Segoe UI} -size 14 -weight bold" self.style = ttk.Style() if sys.platform == "win32": self.style.theme_use('winnative') self.style.configure('.', background=_bgcolor) self.style.configure('.', foreground=_fgcolor) self.style.map('.', background= [('selected', _compcolor), ('active', _ana2color)]) top.geometry("1920x1001+650+150") top.attributes("-fullscreen", True) top.minsize(148, 1) top.maxsize(1924, 1055) top.resizable(1, 1) top.title("New Toplevel") top.configure(background="#17b71c") self.Button1 = tk.Button(top) self.Button1.place(relx=0.927, rely=0.05, height=62, width=68) self.Button1.configure(activebackground="#ececec") self.Button1.configure(activeforeground="#000000") self.Button1.configure(background="#d70428") self.Button1.configure(disabledforeground="#a3a3a3") self.Button1.configure(font=font9) self.Button1.configure(foreground="#ffffff") self.Button1.configure(highlightbackground="#d9d9d9") self.Button1.configure(highlightcolor="#000000") self.Button1.configure(pady="0") self.Button1.configure(text='''X''') self.Button1.configure(command=root.destroy) self.recipe_label = tk.Label(top) self.recipe_label.place(relx=0.23, rely=0.04, height=76, width=900) self.recipe_label.configure(background="#17b71c") self.recipe_label.configure(disabledforeground="#a3a3a3") self.recipe_label.configure(font=font10) self.recipe_label.configure(foreground="#ffffff") self.recipe_label.configure(justify='center') self.url_label = tk.Label(top) self.url_label.place(relx=0.14, rely=0.14, height=56, width=104) self.url_label.configure(activebackground="#f9f9f9") self.url_label.configure(activeforeground="black") self.url_label.configure(background="#17b71c") self.url_label.configure(disabledforeground="#a3a3a3") self.url_label.configure(font=font12) self.url_label.configure(foreground="#ffffff") self.url_label.configure(highlightbackground="#d9d9d9") self.url_label.configure(highlightcolor="black") self.url_label.configure(text='''URL''') self.url = tk.Label(top) self.url.place(relx=0.23, rely=0.14, height=56, width=900) self.url.configure(activebackground="#f9f9f9") self.url.configure(activeforeground="#0000ff") self.url.configure(background="#17b71c") self.url.configure(cursor="hand2") self.url.configure(disabledforeground="#a3a3a3") self.url.configure(font=font13) self.url.configure(foreground="#0000ff") self.url.configure(highlightbackground="#d9d9d9") self.url.configure(highlightcolor="black") self.url.configure(justify='center') self.url.configure(text='''https://www.google.com''') self.ingredients_label = tk.Label(top) self.ingredients_label.place(relx=0.115, rely=0.28, height=56, width=184) self.ingredients_label.configure(activebackground="#f9f9f9") self.ingredients_label.configure(activeforeground="black") self.ingredients_label.configure(background="#17b71c") self.ingredients_label.configure(disabledforeground="#a3a3a3") self.ingredients_label.configure(font=font14) self.ingredients_label.configure(foreground="#ffffff") self.ingredients_label.configure(highlightbackground="#d9d9d9") self.ingredients_label.configure(highlightcolor="black") self.ingredients_label.configure(text='''Ingredients''') self.instructions = tk.Label(top) self.instructions.place(relx=0.47, rely=0.28, height=56, width=184) self.instructions.configure(activebackground="#f9f9f9") self.instructions.configure(activeforeground="black") self.instructions.configure(background="#17b71c") self.instructions.configure(disabledforeground="#a3a3a3") self.instructions.configure(font="-family {Century Gothic} -size 16 -weight bold") self.instructions.configure(foreground="#ffffff") self.instructions.configure(highlightbackground="#d9d9d9") self.instructions.configure(highlightcolor="black") self.instructions.configure(text='''Instructions''') self.nutrient_label = tk.Label(top) self.nutrient_label.place(relx=0.7, rely=0.28, height=58, width=294) self.nutrient_label.configure(activebackground="#f9f9f9") self.nutrient_label.configure(activeforeground="black") self.nutrient_label.configure(background="#17b71c") self.nutrient_label.configure(disabledforeground="#a3a3a3") self.nutrient_label.configure(font="-family {Century Gothic} -size 16 -weight bold") self.nutrient_label.configure(foreground="#ffffff") self.nutrient_label.configure(highlightbackground="#d9d9d9") self.nutrient_label.configure(highlightcolor="black") self.nutrient_label.configure(text='''Nutrient Information''') self.Scrolledtext1 = ScrolledText(top) self.Scrolledtext1.place(relx=0.021, rely=0.34, relheight=0.527 , relwidth=0.302) self.Scrolledtext1.configure(background="white") self.Scrolledtext1.configure(font=font12) self.Scrolledtext1.configure(foreground="black") self.Scrolledtext1.configure(highlightbackground="#d9d9d9") self.Scrolledtext1.configure(highlightcolor="black") self.Scrolledtext1.configure(insertbackground="black") self.Scrolledtext1.configure(insertborderwidth="3") self.Scrolledtext1.configure(selectbackground="#c4c4c4") self.Scrolledtext1.configure(selectforeground="black") self.Scrolledtext1.configure(wrap="none") self.Scrolledtext1_6 = ScrolledText(top) self.Scrolledtext1_6.place(relx=0.344, rely=0.34, relheight=0.527 , relwidth=0.365) self.Scrolledtext1_6.configure(background="white") self.Scrolledtext1_6.configure(font=font11) self.Scrolledtext1_6.configure(foreground="black") self.Scrolledtext1_6.configure(highlightbackground="#d9d9d9") self.Scrolledtext1_6.configure(highlightcolor="black") self.Scrolledtext1_6.configure(insertbackground="black") self.Scrolledtext1_6.configure(insertborderwidth="3") self.Scrolledtext1_6.configure(selectbackground="#c4c4c4") self.Scrolledtext1_6.configure(selectforeground="black") self.Scrolledtext1_6.configure(wrap="none") self.Scrolledtext1_7 = ScrolledText(top) self.Scrolledtext1_7.place(relx=0.729, rely=0.34, relheight=0.527 , relwidth=0.24) self.Scrolledtext1_7.configure(background="white") self.Scrolledtext1_7.configure(font=font11) self.Scrolledtext1_7.configure(foreground="black") self.Scrolledtext1_7.configure(highlightbackground="#d9d9d9") self.Scrolledtext1_7.configure(highlightcolor="black") self.Scrolledtext1_7.configure(insertbackground="black") self.Scrolledtext1_7.configure(insertborderwidth="3") self.Scrolledtext1_7.configure(selectbackground="#c4c4c4") self.Scrolledtext1_7.configure(selectforeground="black") self.Scrolledtext1_7.configure(wrap="none") df = pd.read_csv("recipes.csv") dbfile = open('selected_recipe_name', 'rb') selected_recipe_name = pickle.load(dbfile) recipe = pd.DataFrame(df.loc[df['Title'] == selected_recipe_name]) for idx, r in recipe.iterrows(): self.recipe_label.configure(text=str(r['Title'])) self.url.configure(text=str(r['URL'])) self.url.bind("<Button-1>", lambda e: self.callback(str(r['URL']))) ingredients_list = r['Ingredients'].strip("][").split('\', ') for idx, i in enumerate(ingredients_list): ingredient = i.strip('\'').lstrip() self.Scrolledtext1.insert(END, "\n" + str(idx + 1) + ". " + str(ingredient) + "." + "\n") instructions_list = r['Cooking instructions'].strip("][").split('\', ') for idx, i in enumerate(instructions_list): instruction = i.strip('\'').lstrip() self.Scrolledtext1_6.insert(END, "\n" + str(idx + 1) + ". " + str(instruction) + "." + "\n") self.Scrolledtext1_7.insert(END, r['Serves'] + str(" Serves") + "\n\n") self.Scrolledtext1_7.insert(END, str(r['Calories']) + str(" Calories") + "\n") self.Scrolledtext1_7.insert(END, str(r['Fat']) + str("g Fat") + "\n") self.Scrolledtext1_7.insert(END, str(round(r['Cholesterol'], 2)) + str("g Cholesterol") + "\n") self.Scrolledtext1_7.insert(END, str(r['Carbs']) + str("g Carbs") + "\n") self.Scrolledtext1_7.insert(END, str(r['Fiber']) + str("g Fiber") + "\n") self.Scrolledtext1_7.insert(END, str(r['Protein']) + str("g Protein") + "\n") # The following code is added to facilitate the Scrolled widgets you specified. class AutoScroll(object): """Configure the scrollbars for a widget.""" def __init__(self, master): # Rozen. Added the try-except clauses so that this class # could be used for scrolled entry widget for which vertical # scrolling is not supported. 5/7/14. try: vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview) except: pass hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview) try: self.configure(yscrollcommand=self._autoscroll(vsb)) except: pass self.configure(xscrollcommand=self._autoscroll(hsb)) self.grid(column=0, row=0, sticky='nsew') try: vsb.grid(column=1, row=0, sticky='ns') except: pass hsb.grid(column=0, row=1, sticky='ew') master.grid_columnconfigure(0, weight=1) master.grid_rowconfigure(0, weight=1) # Copy geometry methods of master (taken from ScrolledText.py) if py3: methods = tk.Pack.__dict__.keys() | tk.Grid.__dict__.keys() \ | tk.Place.__dict__.keys() else: methods = tk.Pack.__dict__.keys() + tk.Grid.__dict__.keys() \ + tk.Place.__dict__.keys() for meth in methods: if meth[0] != '_' and meth not in ('config', 'configure'): setattr(self, meth, getattr(master, meth)) @staticmethod def _autoscroll(sbar): '''Hide and show scrollbar as needed.''' def wrapped(first, last): first, last = float(first), float(last) if first <= 0 and last >= 1: sbar.grid_remove() else: sbar.grid() sbar.set(first, last) return wrapped def __str__(self): return str(self.master) def _create_container(func): '''Creates a ttk Frame with a given master, and use this new frame to place the scrollbars and the widget.''' def wrapped(cls, master, **kw): container = ttk.Frame(master) container.bind('<Enter>', lambda e: _bound_to_mousewheel(e, container)) container.bind('<Leave>', lambda e: _unbound_to_mousewheel(e, container)) return func(cls, container, **kw) return wrapped class ScrolledText(AutoScroll, tk.Text): '''A standard Tkinter Text widget with scrollbars that will automatically show/hide as needed.''' @_create_container def __init__(self, master, **kw): tk.Text.__init__(self, master, **kw) AutoScroll.__init__(self, master) import platform def _bound_to_mousewheel(event, widget): child = widget.winfo_children()[0] if platform.system() == 'Windows' or platform.system() == 'Darwin': child.bind_all('<MouseWheel>', lambda e: _on_mousewheel(e, child)) child.bind_all('<Shift-MouseWheel>', lambda e: _on_shiftmouse(e, child)) else: child.bind_all('<Button-4>', lambda e: _on_mousewheel(e, child)) child.bind_all('<Button-5>', lambda e: _on_mousewheel(e, child)) child.bind_all('<Shift-Button-4>', lambda e: _on_shiftmouse(e, child)) child.bind_all('<Shift-Button-5>', lambda e: _on_shiftmouse(e, child)) def _unbound_to_mousewheel(event, widget): if platform.system() == 'Windows' or platform.system() == 'Darwin': widget.unbind_all('<MouseWheel>') widget.unbind_all('<Shift-MouseWheel>') else: widget.unbind_all('<Button-4>') widget.unbind_all('<Button-5>') widget.unbind_all('<Shift-Button-4>') widget.unbind_all('<Shift-Button-5>') def _on_mousewheel(event, widget): if platform.system() == 'Windows': widget.yview_scroll(-1 * int(event.delta / 120), 'units') elif platform.system() == 'Darwin': widget.yview_scroll(-1 * int(event.delta), 'units') else: if event.num == 4: widget.yview_scroll(-1, 'units') elif event.num == 5: widget.yview_scroll(1, 'units') def _on_shiftmouse(event, widget): if platform.system() == 'Windows': widget.xview_scroll(-1 * int(event.delta / 120), 'units') elif platform.system() == 'Darwin': widget.xview_scroll(-1 * int(event.delta), 'units') else: if event.num == 4: widget.xview_scroll(-1, 'units') elif event.num == 5: widget.xview_scroll(1, 'units') ``` **Main method to invoke GUI screen** ``` if __name__ == '__main__': vp_start_gui() ```
github_jupyter
#! /usr/bin/env python # -*- coding: utf-8 -*- import sys import webbrowser from tkinter import END import pandas as pd import pickle try: import Tkinter as tk except ImportError: import tkinter as tk try: import ttk py3 = False except ImportError: import tkinter.ttk as ttk py3 = True import recipe_info_support def vp_start_gui(): """Starting point when module is the main routine.""" global val, w, root root = tk.Tk() top = Toplevel1(root) recipe_info_support.init(root, top) root.mainloop() w = None def create_Toplevel1(rt, *args, **kwargs): """Starting point when module is imported by another module. Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' .""" global w, w_win, root # rt = root root = rt w = tk.Toplevel(root) top = Toplevel1(w) recipe_info_support.init(w, top, *args, **kwargs) return (w, top) def destroy_Toplevel1(): global w w.destroy() w = None class Toplevel1: def callback(self, url): webbrowser.open_new(url) def __init__(self, top=None): """This class configures and populates the toplevel window. top is the toplevel containing window.""" _bgcolor = '#d9d9d9' # X11 color: 'gray85' _fgcolor = '#000000' # X11 color: 'black' _compcolor = '#d9d9d9' # X11 color: 'gray85' _ana1color = '#d9d9d9' # X11 color: 'gray85' _ana2color = '#ececec' # Closest X11 color: 'gray92' font10 = "-family {Century Gothic} -size 24 -weight bold" font11 = "-family {Century Gothic} -size 14" font12 = "-family {Century Gothic} -size 16 -weight bold" font13 = "-family {Century Gothic} -size 14 -weight bold " \ "-underline 1" font14 = "-family {Century Gothic} -size 16 -weight bold" font15 = "-family {Segoe UI} -size 12" font9 = "-family {Segoe UI} -size 14 -weight bold" self.style = ttk.Style() if sys.platform == "win32": self.style.theme_use('winnative') self.style.configure('.', background=_bgcolor) self.style.configure('.', foreground=_fgcolor) self.style.map('.', background= [('selected', _compcolor), ('active', _ana2color)]) top.geometry("1920x1001+650+150") top.attributes("-fullscreen", True) top.minsize(148, 1) top.maxsize(1924, 1055) top.resizable(1, 1) top.title("New Toplevel") top.configure(background="#17b71c") self.Button1 = tk.Button(top) self.Button1.place(relx=0.927, rely=0.05, height=62, width=68) self.Button1.configure(activebackground="#ececec") self.Button1.configure(activeforeground="#000000") self.Button1.configure(background="#d70428") self.Button1.configure(disabledforeground="#a3a3a3") self.Button1.configure(font=font9) self.Button1.configure(foreground="#ffffff") self.Button1.configure(highlightbackground="#d9d9d9") self.Button1.configure(highlightcolor="#000000") self.Button1.configure(pady="0") self.Button1.configure(text='''X''') self.Button1.configure(command=root.destroy) self.recipe_label = tk.Label(top) self.recipe_label.place(relx=0.23, rely=0.04, height=76, width=900) self.recipe_label.configure(background="#17b71c") self.recipe_label.configure(disabledforeground="#a3a3a3") self.recipe_label.configure(font=font10) self.recipe_label.configure(foreground="#ffffff") self.recipe_label.configure(justify='center') self.url_label = tk.Label(top) self.url_label.place(relx=0.14, rely=0.14, height=56, width=104) self.url_label.configure(activebackground="#f9f9f9") self.url_label.configure(activeforeground="black") self.url_label.configure(background="#17b71c") self.url_label.configure(disabledforeground="#a3a3a3") self.url_label.configure(font=font12) self.url_label.configure(foreground="#ffffff") self.url_label.configure(highlightbackground="#d9d9d9") self.url_label.configure(highlightcolor="black") self.url_label.configure(text='''URL''') self.url = tk.Label(top) self.url.place(relx=0.23, rely=0.14, height=56, width=900) self.url.configure(activebackground="#f9f9f9") self.url.configure(activeforeground="#0000ff") self.url.configure(background="#17b71c") self.url.configure(cursor="hand2") self.url.configure(disabledforeground="#a3a3a3") self.url.configure(font=font13) self.url.configure(foreground="#0000ff") self.url.configure(highlightbackground="#d9d9d9") self.url.configure(highlightcolor="black") self.url.configure(justify='center') self.url.configure(text='''https://www.google.com''') self.ingredients_label = tk.Label(top) self.ingredients_label.place(relx=0.115, rely=0.28, height=56, width=184) self.ingredients_label.configure(activebackground="#f9f9f9") self.ingredients_label.configure(activeforeground="black") self.ingredients_label.configure(background="#17b71c") self.ingredients_label.configure(disabledforeground="#a3a3a3") self.ingredients_label.configure(font=font14) self.ingredients_label.configure(foreground="#ffffff") self.ingredients_label.configure(highlightbackground="#d9d9d9") self.ingredients_label.configure(highlightcolor="black") self.ingredients_label.configure(text='''Ingredients''') self.instructions = tk.Label(top) self.instructions.place(relx=0.47, rely=0.28, height=56, width=184) self.instructions.configure(activebackground="#f9f9f9") self.instructions.configure(activeforeground="black") self.instructions.configure(background="#17b71c") self.instructions.configure(disabledforeground="#a3a3a3") self.instructions.configure(font="-family {Century Gothic} -size 16 -weight bold") self.instructions.configure(foreground="#ffffff") self.instructions.configure(highlightbackground="#d9d9d9") self.instructions.configure(highlightcolor="black") self.instructions.configure(text='''Instructions''') self.nutrient_label = tk.Label(top) self.nutrient_label.place(relx=0.7, rely=0.28, height=58, width=294) self.nutrient_label.configure(activebackground="#f9f9f9") self.nutrient_label.configure(activeforeground="black") self.nutrient_label.configure(background="#17b71c") self.nutrient_label.configure(disabledforeground="#a3a3a3") self.nutrient_label.configure(font="-family {Century Gothic} -size 16 -weight bold") self.nutrient_label.configure(foreground="#ffffff") self.nutrient_label.configure(highlightbackground="#d9d9d9") self.nutrient_label.configure(highlightcolor="black") self.nutrient_label.configure(text='''Nutrient Information''') self.Scrolledtext1 = ScrolledText(top) self.Scrolledtext1.place(relx=0.021, rely=0.34, relheight=0.527 , relwidth=0.302) self.Scrolledtext1.configure(background="white") self.Scrolledtext1.configure(font=font12) self.Scrolledtext1.configure(foreground="black") self.Scrolledtext1.configure(highlightbackground="#d9d9d9") self.Scrolledtext1.configure(highlightcolor="black") self.Scrolledtext1.configure(insertbackground="black") self.Scrolledtext1.configure(insertborderwidth="3") self.Scrolledtext1.configure(selectbackground="#c4c4c4") self.Scrolledtext1.configure(selectforeground="black") self.Scrolledtext1.configure(wrap="none") self.Scrolledtext1_6 = ScrolledText(top) self.Scrolledtext1_6.place(relx=0.344, rely=0.34, relheight=0.527 , relwidth=0.365) self.Scrolledtext1_6.configure(background="white") self.Scrolledtext1_6.configure(font=font11) self.Scrolledtext1_6.configure(foreground="black") self.Scrolledtext1_6.configure(highlightbackground="#d9d9d9") self.Scrolledtext1_6.configure(highlightcolor="black") self.Scrolledtext1_6.configure(insertbackground="black") self.Scrolledtext1_6.configure(insertborderwidth="3") self.Scrolledtext1_6.configure(selectbackground="#c4c4c4") self.Scrolledtext1_6.configure(selectforeground="black") self.Scrolledtext1_6.configure(wrap="none") self.Scrolledtext1_7 = ScrolledText(top) self.Scrolledtext1_7.place(relx=0.729, rely=0.34, relheight=0.527 , relwidth=0.24) self.Scrolledtext1_7.configure(background="white") self.Scrolledtext1_7.configure(font=font11) self.Scrolledtext1_7.configure(foreground="black") self.Scrolledtext1_7.configure(highlightbackground="#d9d9d9") self.Scrolledtext1_7.configure(highlightcolor="black") self.Scrolledtext1_7.configure(insertbackground="black") self.Scrolledtext1_7.configure(insertborderwidth="3") self.Scrolledtext1_7.configure(selectbackground="#c4c4c4") self.Scrolledtext1_7.configure(selectforeground="black") self.Scrolledtext1_7.configure(wrap="none") df = pd.read_csv("recipes.csv") dbfile = open('selected_recipe_name', 'rb') selected_recipe_name = pickle.load(dbfile) recipe = pd.DataFrame(df.loc[df['Title'] == selected_recipe_name]) for idx, r in recipe.iterrows(): self.recipe_label.configure(text=str(r['Title'])) self.url.configure(text=str(r['URL'])) self.url.bind("<Button-1>", lambda e: self.callback(str(r['URL']))) ingredients_list = r['Ingredients'].strip("][").split('\', ') for idx, i in enumerate(ingredients_list): ingredient = i.strip('\'').lstrip() self.Scrolledtext1.insert(END, "\n" + str(idx + 1) + ". " + str(ingredient) + "." + "\n") instructions_list = r['Cooking instructions'].strip("][").split('\', ') for idx, i in enumerate(instructions_list): instruction = i.strip('\'').lstrip() self.Scrolledtext1_6.insert(END, "\n" + str(idx + 1) + ". " + str(instruction) + "." + "\n") self.Scrolledtext1_7.insert(END, r['Serves'] + str(" Serves") + "\n\n") self.Scrolledtext1_7.insert(END, str(r['Calories']) + str(" Calories") + "\n") self.Scrolledtext1_7.insert(END, str(r['Fat']) + str("g Fat") + "\n") self.Scrolledtext1_7.insert(END, str(round(r['Cholesterol'], 2)) + str("g Cholesterol") + "\n") self.Scrolledtext1_7.insert(END, str(r['Carbs']) + str("g Carbs") + "\n") self.Scrolledtext1_7.insert(END, str(r['Fiber']) + str("g Fiber") + "\n") self.Scrolledtext1_7.insert(END, str(r['Protein']) + str("g Protein") + "\n") # The following code is added to facilitate the Scrolled widgets you specified. class AutoScroll(object): """Configure the scrollbars for a widget.""" def __init__(self, master): # Rozen. Added the try-except clauses so that this class # could be used for scrolled entry widget for which vertical # scrolling is not supported. 5/7/14. try: vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview) except: pass hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview) try: self.configure(yscrollcommand=self._autoscroll(vsb)) except: pass self.configure(xscrollcommand=self._autoscroll(hsb)) self.grid(column=0, row=0, sticky='nsew') try: vsb.grid(column=1, row=0, sticky='ns') except: pass hsb.grid(column=0, row=1, sticky='ew') master.grid_columnconfigure(0, weight=1) master.grid_rowconfigure(0, weight=1) # Copy geometry methods of master (taken from ScrolledText.py) if py3: methods = tk.Pack.__dict__.keys() | tk.Grid.__dict__.keys() \ | tk.Place.__dict__.keys() else: methods = tk.Pack.__dict__.keys() + tk.Grid.__dict__.keys() \ + tk.Place.__dict__.keys() for meth in methods: if meth[0] != '_' and meth not in ('config', 'configure'): setattr(self, meth, getattr(master, meth)) @staticmethod def _autoscroll(sbar): '''Hide and show scrollbar as needed.''' def wrapped(first, last): first, last = float(first), float(last) if first <= 0 and last >= 1: sbar.grid_remove() else: sbar.grid() sbar.set(first, last) return wrapped def __str__(self): return str(self.master) def _create_container(func): '''Creates a ttk Frame with a given master, and use this new frame to place the scrollbars and the widget.''' def wrapped(cls, master, **kw): container = ttk.Frame(master) container.bind('<Enter>', lambda e: _bound_to_mousewheel(e, container)) container.bind('<Leave>', lambda e: _unbound_to_mousewheel(e, container)) return func(cls, container, **kw) return wrapped class ScrolledText(AutoScroll, tk.Text): '''A standard Tkinter Text widget with scrollbars that will automatically show/hide as needed.''' @_create_container def __init__(self, master, **kw): tk.Text.__init__(self, master, **kw) AutoScroll.__init__(self, master) import platform def _bound_to_mousewheel(event, widget): child = widget.winfo_children()[0] if platform.system() == 'Windows' or platform.system() == 'Darwin': child.bind_all('<MouseWheel>', lambda e: _on_mousewheel(e, child)) child.bind_all('<Shift-MouseWheel>', lambda e: _on_shiftmouse(e, child)) else: child.bind_all('<Button-4>', lambda e: _on_mousewheel(e, child)) child.bind_all('<Button-5>', lambda e: _on_mousewheel(e, child)) child.bind_all('<Shift-Button-4>', lambda e: _on_shiftmouse(e, child)) child.bind_all('<Shift-Button-5>', lambda e: _on_shiftmouse(e, child)) def _unbound_to_mousewheel(event, widget): if platform.system() == 'Windows' or platform.system() == 'Darwin': widget.unbind_all('<MouseWheel>') widget.unbind_all('<Shift-MouseWheel>') else: widget.unbind_all('<Button-4>') widget.unbind_all('<Button-5>') widget.unbind_all('<Shift-Button-4>') widget.unbind_all('<Shift-Button-5>') def _on_mousewheel(event, widget): if platform.system() == 'Windows': widget.yview_scroll(-1 * int(event.delta / 120), 'units') elif platform.system() == 'Darwin': widget.yview_scroll(-1 * int(event.delta), 'units') else: if event.num == 4: widget.yview_scroll(-1, 'units') elif event.num == 5: widget.yview_scroll(1, 'units') def _on_shiftmouse(event, widget): if platform.system() == 'Windows': widget.xview_scroll(-1 * int(event.delta / 120), 'units') elif platform.system() == 'Darwin': widget.xview_scroll(-1 * int(event.delta), 'units') else: if event.num == 4: widget.xview_scroll(-1, 'units') elif event.num == 5: widget.xview_scroll(1, 'units') if __name__ == '__main__': vp_start_gui()
0.375477
0.316607
``` %run C:/Users/HSS/dd.py from sklearn.datasets import fetch_olivetti_faces faces_all = fetch_olivetti_faces() K = 20 # 20번 인물의 사진만 선택 faces = faces_all.images[faces_all.target == K] N = 2 M = 5 fig = plt.figure(figsize=(10, 5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) for i in range(N): for j in range(M): k = i * M + j ax = fig.add_subplot(N, M, k+1) ax.imshow(faces[k], cmap=plt.cm.bone) ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.suptitle("올리베티 얼굴 사진") plt.tight_layout() plt.show() from sklearn.decomposition import PCA pca3 = PCA(n_components=2) X3 = faces_all.data[faces_all.target == K] W3 = pca3.fit_transform(X3) X32 = pca3.inverse_transform(W3) N = 2 M = 5 fig = plt.figure(figsize=(10, 5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) for i in range(N): for j in range(M): k = i * M + j ax = fig.add_subplot(N, M, k+1) ax.imshow(X32[k].reshape(64, 64), cmap=plt.cm.bone) ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.suptitle("주성분 분석으로 근사화한 올리베티 얼굴 사진") plt.tight_layout() plt.show() plt.tight_layout() face_mean = pca3.mean_.reshape(64, 64) face_p1 = pca3.components_[0].reshape(64, 64) face_p2 = pca3.components_[1].reshape(64, 64) plt.subplot(131) plt.imshow(face_mean, cmap=plt.cm.bone) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.title("평균 얼굴") plt.subplot(132) plt.imshow(face_p1, cmap=plt.cm.bone) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.title("주성분 1") plt.subplot(133) plt.imshow(face_p2, cmap=plt.cm.bone) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.title("주성분 2") plt.show() N = 2 M = 5 fig = plt.figure(figsize=(10, 5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) for i in range(N): for j in range(M): k = i * M + j ax = fig.add_subplot(N, M, k+1) w = 1.5 * (k - 5) if k < 5 else 1.5 * (k - 4) ax.imshow(face_mean + w * face_p1, cmap=plt.cm.bone) ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.title("주성분1의 비중={}".format(w)) plt.suptitle("평균 얼굴에 주성분1을 더한 사진") plt.tight_layout() plt.show() ``` ### KNN - K개의 가까운 데이터 선별 - 장점 - 학습이 필요하지 않다 -> 속도에 경쟁성 - 단점 - 고차원 데이터의 경우 연산량이 많다 ``` from sklearn.datasets import load_iris iris = load_iris() from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size = 0.3, random_state = 13, stratify = iris.target) from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors = 5) knn.fit(x_train, y_train) from sklearn.metrics import accuracy_score pred = knn.predict(x_test) print(accuracy_score(y_test, pred)) from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, pred)) print(classification_report(y_test, pred)) ```
github_jupyter
%run C:/Users/HSS/dd.py from sklearn.datasets import fetch_olivetti_faces faces_all = fetch_olivetti_faces() K = 20 # 20번 인물의 사진만 선택 faces = faces_all.images[faces_all.target == K] N = 2 M = 5 fig = plt.figure(figsize=(10, 5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) for i in range(N): for j in range(M): k = i * M + j ax = fig.add_subplot(N, M, k+1) ax.imshow(faces[k], cmap=plt.cm.bone) ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.suptitle("올리베티 얼굴 사진") plt.tight_layout() plt.show() from sklearn.decomposition import PCA pca3 = PCA(n_components=2) X3 = faces_all.data[faces_all.target == K] W3 = pca3.fit_transform(X3) X32 = pca3.inverse_transform(W3) N = 2 M = 5 fig = plt.figure(figsize=(10, 5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) for i in range(N): for j in range(M): k = i * M + j ax = fig.add_subplot(N, M, k+1) ax.imshow(X32[k].reshape(64, 64), cmap=plt.cm.bone) ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.suptitle("주성분 분석으로 근사화한 올리베티 얼굴 사진") plt.tight_layout() plt.show() plt.tight_layout() face_mean = pca3.mean_.reshape(64, 64) face_p1 = pca3.components_[0].reshape(64, 64) face_p2 = pca3.components_[1].reshape(64, 64) plt.subplot(131) plt.imshow(face_mean, cmap=plt.cm.bone) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.title("평균 얼굴") plt.subplot(132) plt.imshow(face_p1, cmap=plt.cm.bone) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.title("주성분 1") plt.subplot(133) plt.imshow(face_p2, cmap=plt.cm.bone) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.title("주성분 2") plt.show() N = 2 M = 5 fig = plt.figure(figsize=(10, 5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) for i in range(N): for j in range(M): k = i * M + j ax = fig.add_subplot(N, M, k+1) w = 1.5 * (k - 5) if k < 5 else 1.5 * (k - 4) ax.imshow(face_mean + w * face_p1, cmap=plt.cm.bone) ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.title("주성분1의 비중={}".format(w)) plt.suptitle("평균 얼굴에 주성분1을 더한 사진") plt.tight_layout() plt.show() from sklearn.datasets import load_iris iris = load_iris() from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size = 0.3, random_state = 13, stratify = iris.target) from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors = 5) knn.fit(x_train, y_train) from sklearn.metrics import accuracy_score pred = knn.predict(x_test) print(accuracy_score(y_test, pred)) from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, pred)) print(classification_report(y_test, pred))
0.544075
0.763351
<p><font size="6"><b> CASE - Bike count data</b></font></p> > *DS Data manipulation, analysis and visualization in Python* > *May/June, 2021* > > *© 2021, Joris Van den Bossche and Stijn Van Hoey (<mailto:[email protected]>, <mailto:[email protected]>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)* --- <img src="https://static.nieuwsblad.be/Assets/Images_Upload/2014/04/17/57b8f34e-5042-11e2-80ee-5d1d7b74455f_original.jpg.h380.jpg.568.jpg?maxheight=460&maxwidth=638&scale=both"> In this case study, we will make use of the openly available bike count data of the city of Ghent (Belgium). At the Coupure Links, next to the Faculty of Bioscience Engineering, a counter keeps track of the number of passing cyclists in both directions. ``` import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') ``` # Reading and processing the data ## Read csv data The data were previously available on the open data portal of the city, and we downloaded them in the `CSV` format, and provided the original file as `data/fietstellingencoupure.csv`. This dataset contains the historical data of the bike counters, and consists of the following columns: - The first column `datum` is the date, in `dd/mm/yy` format - The second column `tijd` is the time of the day, in `hh:mm` format - The third and fourth column `ri Centrum` and `ri Mariakerke` are the counts at that point in time (counts between this timestamp and the previous) <div class="alert alert-success"> **EXERCISE** - Read the csv file from the url into a DataFrame `df`, the delimiter of the data is `;` - Inspect the first and last 5 rows, and check the number of observations - Inspect the data types of the different columns <details><summary>Hints</summary> - With the cursor on a function, you can combine the SHIFT + TAB keystrokes to see the documentation of a function. - Both the `sep` and `delimiter` argument will work to define the delimiter. - Methods like `head`/`tail` have round brackets `()`, attributes like `dtypes` not. </details> </div> ``` df = pd.read_csv("data/fietstellingencoupure.csv", sep=';') df.head() df.tail() len(df) df.dtypes ``` ## Data processing As explained above, the first and second column (respectively `datum` and `tijd`) indicate the date and hour of the day. To obtain a time series, we have to combine those two columns into one series of actual timestamp values. <div class="alert alert-success"> **EXERCISE** Pre-process the data: * Combine the 'datum' and 'tijd' columns into one Pandas Series of string datetime values, call this new variable `combined`. * Parse the string datetime values to `datetime` objects. * Set the resulting `datetime` column as the index of the `df` DataFrame. * Remove the original 'datum' and 'tijd' columns using the `drop` method, and call the new dataframe `df2`. * Rename the columns in the DataFrame 'ri Centrum', 'ri Mariakerke' to resp. 'direction_centre', 'direction_mariakerke' using the `rename` method. <details><summary>Hints</summary> - Concatenating strings can be done with the addition operation `+`. - When converting strings to a `datetime` with `pd.to_datetime`, specifying the format will make the conversion a lot faster. - `drop` can remove both rows and columns using the names of the index or column name. Make sure to define `columns=` argument to remove columns. - `rename` can be used for both rows/columns. It needs a dictionary with the current names as keys and the new names as values. </details> ``` combined = df['datum'] + ' ' + df['tijd'] combined.head() df.index = pd.to_datetime(combined, format="%d/%m/%Y %H:%M") df2 = df.drop(columns=['datum', 'tijd']) df2 = df2.rename(columns={'ri Centrum': 'direction_centre', 'ri Mariakerke':'direction_mariakerke'}) df2.head() ``` Having the data available with an interpreted `datetime`, provides us the possibility of having time aware plotting: ``` fig, ax = plt.subplots(figsize=(10, 6)) df.plot(colormap='coolwarm', ax=ax) ``` <div class="alert alert-warning"> <b>Remark</b>: Interpretation of the dates with and without predefined date format. </div> When we just want to interpret the dates, without specifying how the dates are formatted, Pandas makes an attempt as good as possible: ``` combined = df['datum'] + ' ' + df['tijd'] %timeit -n 1 -r 1 pd.to_datetime(combined, dayfirst=True) ``` However, when we already know the format of the dates (and if this is consistent throughout the full dataset), we can use this information to interpret the dates: ``` %timeit pd.to_datetime(combined, format="%d/%m/%Y %H:%M") ``` <div class="alert alert-info"> <b>Remember</b>: Whenever possible, specify the date format to interpret the dates to `datetime` values! </div> ### Write the dataset cleaning as a function In order to make it easier to reuse the code for the pre-processing we have implemented, let's convert the code to a Python function: <div class="alert alert-success"> **EXERCISE** Write a function `process_bike_count_data(df)` that performs the processing steps as done above for an input Pandas DataFrame and returns the updated DataFrame. <details><summary>Hints</summary> - Want to know more about proper documenting your Python functions? Check out the official guide of [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html). The `Parameters` and `Returns` sections should always be explained. </details> ``` def process_bike_count_data(df): """Process the provided dataframe: parse datetimes and rename columns. Parameters ---------- df : pandas.DataFrame DataFrame as read from the raw `fietstellingen`, containing the `datum`, `tijd`, `ri Centrum` and `ri Mariakerke` columns. Returns ------- df2 : pandas.DataFrame DataFrame with the datetime info as index and the `direction_centre` and `direction_mariakerke` columns with the counts. """ df.index = pd.to_datetime(df['datum'] + ' ' + df['tijd'], format="%d/%m/%Y %H:%M") df2 = df.drop(columns=['datum', 'tijd']) df2 = df2.rename(columns={'ri Centrum': 'direction_centre', 'ri Mariakerke':'direction_mariakerke'}) return df2 df_raw = pd.read_csv("data/fietstellingencoupure.csv", sep=';') df_preprocessed = process_bike_count_data(df_raw) df_preprocessed.head() ``` ### Store our collected dataset as an interim data product As we finished our data-collection step, we want to save this result as an interim data output of our small investigation. As such, we do not have to re-download all the files each time something went wrong, but can restart from our interim step. ``` df_preprocessed.to_csv("bike_count_interim.csv") ``` ## Data exploration and analysis We now have a cleaned-up dataset of the bike counts at Coupure Links in Ghent (Belgium). Next, we want to get an impression of the characteristics and properties of the data ### Load the interim data Reading the file in from the interim file (when you want to rerun the whole analysis on the updated online data, you would comment out this cell...) ``` df = pd.read_csv("bike_count_interim.csv", index_col=0, parse_dates=True) ``` ### Count interval verification The number of bikers are counted for intervals of approximately 15 minutes. But let's check if this is indeed the case. Calculate the difference between each of the consecutive values of the index. We can use the `Series.diff()` method: ``` pd.Series(df.index).diff() ``` The count of the possible intervals is of interest: ``` pd.Series(df.index).diff().value_counts() ``` There are a few records that are not exactly 15min. But given it are only a few ones, we will ignore this for the current case study and just keep them for this explorative study. Bonus question: do you know where the values of `-1 days +23:15:01` and `01:15:00` are coming from? ``` df.describe() ``` ### Quiet periods <div class="alert alert-success"> **EXERCISE** Create a new Pandas Series `df_both` which contains the sum of the counts of both directions. <details><summary>Hints</summary> - Check the purpose of the `axis` argument of the `sum` method. </details> ``` df_both = df.sum(axis=1) df_both ``` <div class="alert alert-success"> **EXERCISE** Using the `df_both` from the previous exercise, create a new Series `df_quiet` which contains only those intervals for which less than 5 cyclists passed in both directions combined <details><summary>Hints</summary> - Use the `[]` to select data. You can use conditions (so-called _boolean indexing_) returning True/False inside the brackets. </details> ``` df_quiet = df_both[df_both < 5] ``` <div class="alert alert-success"> **EXERCISE** Using the original data `df`, select only the intervals for which less than 3 cyclists passed in one or the other direction. Hence, less than 3 cyclists towards the center or less than 3 cyclists towards Mariakerke. <details><summary>Hints</summary> - To combine conditions use the `|` (or) or the `&` (and) operators. - Make sure to use `()` around each individual condition. </details> ``` df[(df['direction_centre'] < 3) | (df['direction_mariakerke'] < 3)] ``` ### Count statistics <div class="alert alert-success"> **EXERCISE** What is the average number of bikers passing each 15 min? <details><summary>Hints</summary> - As the time series is already 15min level, this is just the same as taking the mean. </details> ``` df.mean() ``` <div class="alert alert-success"> **EXERCISE** What is the average number of bikers passing each hour? <details><summary>Hints</summary> - Use `resample` to first calculate the number of bikers passing each hour. - `resample` requires an aggregation function that defines how to combine the values within each group (in this case all values within each hour). </details> ``` df.resample('H').sum().mean() ``` <div class="alert alert-success"> **EXERCISE** What are the 10 highest peak values observed during any of the intervals for the direction towards the center of Ghent? <details><summary>Hints</summary> - Pandas provides the `nsmallest` and `nlargest` methods to derive N smallest/largest values of a column. </details> ``` df['direction_centre'].nlargest(10) # alternative: # df['direction_centre'].sort_values(ascending=False).head(10) ``` <div class="alert alert-success"> **EXERCISE** What is the maximum number of cyclist that passed on a single day calculated on both directions combined? <details><summary>Hints</summary> - Combine both directions by taking the sum. - Next, `resample` to daily values - Get the maximum value or ask for the n largest to see the dates as well. </details> ``` df_both = df.sum(axis=1) df_daily = df_both.resample('D').sum() df_daily.max() df_daily.nlargest(10) ``` The high number of bikers passing on 2013-06-05 was not by coincidence: http://www.nieuwsblad.be/cnt/dmf20130605_022 ;-) ### Trends as function of time <div class="alert alert-success"> **EXERCISE** How does the long-term trend look like? Calculate monthly sums and plot the result. <details><summary>Hints</summary> - The symbol for monthly resampling is `M`. - Use the `plot` method of Pandas, which will generate a line plot of each numeric column by default. </details> ``` df_monthly = df.resample('M').sum() df_monthly.plot() ``` <div class="alert alert-success"> **EXERCISE** Let's have a look at some short term patterns. For the data of the first 3 weeks of January 2014, calculate the hourly counts and visualize them. <details><summary>Hints</summary> - Slicing is done using `[]`, you can use string representation of dates to select from a `datetime` index: e.g. `'2010-01-01':'2020-12-31'` </details> ``` df_hourly = df.resample('H').sum() df_hourly.head() df_hourly['2014-01-01':'2014-01-20'].plot() ``` **New Year's Eve 2013-2014** <div class="alert alert-success"> **EXERCISE** - Select a subset of the dataset from 2013-12-31 12:00:00 until 2014-01-01 12:00:00 and assign the result to a new variable `newyear` - Plot the selected data `newyear`. - Use a `rolling` function with a window of 10 values (check documentation of the function) to smooth the data of this period and make a plot of the smoothed version. <details><summary>Hints</summary> - Just like `resample`, `rolling` requires an aggregate statistic (e.g. mean, median,...) to combine the values within the window. </details> ``` newyear = df["2013-12-31 12:00:00": "2014-01-01 12:00:00"] newyear.plot() newyear.rolling(10, center=True).mean().plot(linewidth=2) ``` A more advanced usage of Matplotlib to create a combined plot: ``` # A more in-detail plotting version of the graph. fig, ax = plt.subplots() newyear.plot(ax=ax, color=['LightGreen', 'LightBlue'], legend=False, rot=0) newyear.rolling(10, center=True).mean().plot(linewidth=2, ax=ax, color=['DarkGreen', 'DarkBlue'], rot=0) ax.set_xlabel('') ax.set_ylabel('Cyclists count') ``` --- ## The power of `groupby`... Looking at the data in the above exercises, there seems to be clearly a: - daily pattern - weekly pattern - yearly pattern Such patterns can easily be calculated and visualized in pandas using the `DatetimeIndex` attributes `dayofweek` combined with `groupby` functionality. Below a taste of the possibilities, and we will learn about this in the proceeding notebooks: **Weekly pattern**: ``` df_daily = df.resample('D').sum() df_daily.groupby(df_daily.index.dayofweek).mean().plot(kind='bar') ``` **Daily pattern:** ``` df_hourly.groupby(df_hourly.index.hour).mean().plot() ``` So the daily pattern is clearly different for both directions. In the morning more people go north, in the evening more people go south. The morning peak is also more condensed. **Monthly pattern** ``` df_monthly = df.resample('M').sum() from calendar import month_abbr ax = df_monthly.groupby(df_monthly.index.month).mean().plot() ax.set_ylim(0) xlabels = ax.set_xticklabels(list(month_abbr)[0::2]) #too lazy to write the month values yourself... ``` ## Acknowledgements Thanks to the [city of Ghent](https://data.stad.gent/) for opening their data
github_jupyter
import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') df = pd.read_csv("data/fietstellingencoupure.csv", sep=';') df.head() df.tail() len(df) df.dtypes combined = df['datum'] + ' ' + df['tijd'] combined.head() df.index = pd.to_datetime(combined, format="%d/%m/%Y %H:%M") df2 = df.drop(columns=['datum', 'tijd']) df2 = df2.rename(columns={'ri Centrum': 'direction_centre', 'ri Mariakerke':'direction_mariakerke'}) df2.head() fig, ax = plt.subplots(figsize=(10, 6)) df.plot(colormap='coolwarm', ax=ax) combined = df['datum'] + ' ' + df['tijd'] %timeit -n 1 -r 1 pd.to_datetime(combined, dayfirst=True) %timeit pd.to_datetime(combined, format="%d/%m/%Y %H:%M") def process_bike_count_data(df): """Process the provided dataframe: parse datetimes and rename columns. Parameters ---------- df : pandas.DataFrame DataFrame as read from the raw `fietstellingen`, containing the `datum`, `tijd`, `ri Centrum` and `ri Mariakerke` columns. Returns ------- df2 : pandas.DataFrame DataFrame with the datetime info as index and the `direction_centre` and `direction_mariakerke` columns with the counts. """ df.index = pd.to_datetime(df['datum'] + ' ' + df['tijd'], format="%d/%m/%Y %H:%M") df2 = df.drop(columns=['datum', 'tijd']) df2 = df2.rename(columns={'ri Centrum': 'direction_centre', 'ri Mariakerke':'direction_mariakerke'}) return df2 df_raw = pd.read_csv("data/fietstellingencoupure.csv", sep=';') df_preprocessed = process_bike_count_data(df_raw) df_preprocessed.head() df_preprocessed.to_csv("bike_count_interim.csv") df = pd.read_csv("bike_count_interim.csv", index_col=0, parse_dates=True) pd.Series(df.index).diff() pd.Series(df.index).diff().value_counts() df.describe() df_both = df.sum(axis=1) df_both df_quiet = df_both[df_both < 5] df[(df['direction_centre'] < 3) | (df['direction_mariakerke'] < 3)] df.mean() df.resample('H').sum().mean() df['direction_centre'].nlargest(10) # alternative: # df['direction_centre'].sort_values(ascending=False).head(10) df_both = df.sum(axis=1) df_daily = df_both.resample('D').sum() df_daily.max() df_daily.nlargest(10) df_monthly = df.resample('M').sum() df_monthly.plot() df_hourly = df.resample('H').sum() df_hourly.head() df_hourly['2014-01-01':'2014-01-20'].plot() newyear = df["2013-12-31 12:00:00": "2014-01-01 12:00:00"] newyear.plot() newyear.rolling(10, center=True).mean().plot(linewidth=2) # A more in-detail plotting version of the graph. fig, ax = plt.subplots() newyear.plot(ax=ax, color=['LightGreen', 'LightBlue'], legend=False, rot=0) newyear.rolling(10, center=True).mean().plot(linewidth=2, ax=ax, color=['DarkGreen', 'DarkBlue'], rot=0) ax.set_xlabel('') ax.set_ylabel('Cyclists count') df_daily = df.resample('D').sum() df_daily.groupby(df_daily.index.dayofweek).mean().plot(kind='bar') df_hourly.groupby(df_hourly.index.hour).mean().plot() df_monthly = df.resample('M').sum() from calendar import month_abbr ax = df_monthly.groupby(df_monthly.index.month).mean().plot() ax.set_ylim(0) xlabels = ax.set_xticklabels(list(month_abbr)[0::2]) #too lazy to write the month values yourself...
0.766119
0.986917
``` import numpy as np import pandas as pd import matplotlib.pylab as plt %matplotlib inline ``` ### Read data ``` train = pd.read_json('../data/train.json') del train['id'] train.shape train.head() "Number of NaN angle data: {}".format( len(train[train.inc_angle == "na"])) ``` ``` train = train[train['inc_angle'] != "na"] train['inc_angle'] = train['inc_angle'].astype('float32') band_1 = np.asarray(train.iloc[0]['band_1']).reshape((75, 75)) band_2 = np.asarray(train.iloc[0]['band_2']).reshape((75, 75)) plt.imshow(band_1) plt.imshow(band_2) ``` ### Display correlation ``` print "Max angle: {}".format(train['inc_angle'].max()) print "Min angle: {}".format(train['inc_angle'].min()) print "Mean angle: {}".format(train['inc_angle'].mean()) print "Std angle: {}".format(train['inc_angle'].std()) train['band_1_mean'] = train['band_1'].apply(lambda x: np.asarray(x).mean()) train['band_1_std'] = train['band_1'].apply(lambda x: np.asarray(x).std()) train['band_2_mean'] = train['band_2'].apply(lambda x: np.asarray(x).mean()) train['band_2_std'] = train['band_2'].apply(lambda x: np.asarray(x).std()) train_corr = train[['inc_angle', 'band_1_mean', 'band_2_mean', 'band_1_std', 'band_2_std']] train_corr.corr() ``` Linear regression of mean figure value with inc_angle ``` reg1 = np.polyfit(train['inc_angle'], train['band_1_mean'], 1) reg2 = np.polyfit(train['inc_angle'], train['band_2_mean'], 1) ``` Remove dependence on the angle ``` train['band_1_mean_detrended'] = train['band_1_mean'] - reg1[0] * train['inc_angle'] - reg1[1] train['band_2_mean_detrended'] = train['band_2_mean'] - reg2[0] * train['inc_angle'] - reg2[1] train_corr = train[['inc_angle', 'band_1_mean', 'band_2_mean', 'band_1_mean_detrended', 'band_2_mean_detrended']] train_corr.corr() ``` Then remove dependence on the angle from the figures ``` train['band_1'][:3] def get_detrended_band(band, value): band_arr = np.asarray(band).astype(np.float32) return np.array(band_arr - value).tolist() train['band_1_detrended'] = train.apply( lambda row: get_detrended_band(row['band_1'], reg1[0] * row['inc_angle']+reg1[1]), axis=1) train['band_2_detrended'] = train.apply( lambda row: get_detrended_band(row['band_2'], reg2[0] * row['inc_angle']+reg2[1]), axis=1) train['band_1_detrended'][:3] band_1_detrended = np.asarray(train.iloc[0]['band_1_detrended']).reshape((75, 75)) band_2_detrended = np.asarray(train.iloc[0]['band_2_detrended']).reshape((75, 75)) plt.imshow(band_1_detrended) plt.imshow(band_1) plt.imshow(band_2_detrended) plt.imshow(band_2) ```
github_jupyter
import numpy as np import pandas as pd import matplotlib.pylab as plt %matplotlib inline train = pd.read_json('../data/train.json') del train['id'] train.shape train.head() "Number of NaN angle data: {}".format( len(train[train.inc_angle == "na"])) train = train[train['inc_angle'] != "na"] train['inc_angle'] = train['inc_angle'].astype('float32') band_1 = np.asarray(train.iloc[0]['band_1']).reshape((75, 75)) band_2 = np.asarray(train.iloc[0]['band_2']).reshape((75, 75)) plt.imshow(band_1) plt.imshow(band_2) print "Max angle: {}".format(train['inc_angle'].max()) print "Min angle: {}".format(train['inc_angle'].min()) print "Mean angle: {}".format(train['inc_angle'].mean()) print "Std angle: {}".format(train['inc_angle'].std()) train['band_1_mean'] = train['band_1'].apply(lambda x: np.asarray(x).mean()) train['band_1_std'] = train['band_1'].apply(lambda x: np.asarray(x).std()) train['band_2_mean'] = train['band_2'].apply(lambda x: np.asarray(x).mean()) train['band_2_std'] = train['band_2'].apply(lambda x: np.asarray(x).std()) train_corr = train[['inc_angle', 'band_1_mean', 'band_2_mean', 'band_1_std', 'band_2_std']] train_corr.corr() reg1 = np.polyfit(train['inc_angle'], train['band_1_mean'], 1) reg2 = np.polyfit(train['inc_angle'], train['band_2_mean'], 1) train['band_1_mean_detrended'] = train['band_1_mean'] - reg1[0] * train['inc_angle'] - reg1[1] train['band_2_mean_detrended'] = train['band_2_mean'] - reg2[0] * train['inc_angle'] - reg2[1] train_corr = train[['inc_angle', 'band_1_mean', 'band_2_mean', 'band_1_mean_detrended', 'band_2_mean_detrended']] train_corr.corr() train['band_1'][:3] def get_detrended_band(band, value): band_arr = np.asarray(band).astype(np.float32) return np.array(band_arr - value).tolist() train['band_1_detrended'] = train.apply( lambda row: get_detrended_band(row['band_1'], reg1[0] * row['inc_angle']+reg1[1]), axis=1) train['band_2_detrended'] = train.apply( lambda row: get_detrended_band(row['band_2'], reg2[0] * row['inc_angle']+reg2[1]), axis=1) train['band_1_detrended'][:3] band_1_detrended = np.asarray(train.iloc[0]['band_1_detrended']).reshape((75, 75)) band_2_detrended = np.asarray(train.iloc[0]['band_2_detrended']).reshape((75, 75)) plt.imshow(band_1_detrended) plt.imshow(band_1) plt.imshow(band_2_detrended) plt.imshow(band_2)
0.429429
0.868046
## Introduction In this exercise, we will motivate the least-squares problem for linear regression. The point of this exercise is to recall some relevant ideas/notation from calculus and linear algebra and to practice some basic calculations that are common in the optimization world. We provide problems <i>and</i> their solutions, so that this document is suitable for self study. But, we encourage readers to try the exercises before looking at the answers. Feel free to do the math exercises on scrap paper---no need to learn how to type up equations! ## Problem setup Suppose we wish to predict the height of a plant grown for a month as a function of several factors: <ul> <li> The amount of water we give the plant per week, in liters ($x_1$)</li> <li> The amount of fertilizer we mix with the soil, in cubic inches ($x_2$)</li> <li> The distance of the plant to the window, in feet ($x_3$)</li> <li> The height of the plant at the beginning of the month, in inches ($x_4$)</li> </ul> We can use a <i>vector</i> $x\in\mathbb R^4$ to represent a given plant. Here, $\mathbb R^4$ denotes the set of 4-dimensional vectors: $x=(x_1,x_2,x_3,x_4).$ We will assume in our course that vectors are <i>column vectors</i>, meaning that points in $\mathbb R^k$ can be written as $k\times 1$ matrices: $$x=\begin{pmatrix} x_1\\x_2\\x_3\\x_4 \end{pmatrix} = \begin{pmatrix} \textrm{water} \\ \textrm{fertilizer} \\ \textrm{window distance} \\ \textrm{initial height}\end{pmatrix}.$$ ## Linear regression Let's say we have a collection of $n$ plants. Each plant $p$, where $p\in\{1,\ldots,n\}$, experiences its own factors described in the previous section, which we can store in a vector $x^{(p)}\in\mathbb R^4$. We grow each plant for a month and measure its height $h^{(p)}\in \mathbb R$. Our goal is to develop a <i>model</i> for predicting $h$ from $x$, that is, for predicting the height of the plant after a month of growth from the factors we identified above. In <i>linear regression</i>, we assume that $h$ is well-approximated by a weighted average of the elements of $x$. That is, we think there is an unknown <i>weight vector</i> $w\in\mathbb R^4$ such that $$h\approx w_1x_1+w_2x_2+w_3x_3+w_4 x_4 = \sum_{k=1}^4 w_k x_k= w\cdot x.$$ Make sure you understand the three equivalent ways to denote the sum on the right of the $\approx$; the fourth is a <i>dot product</i> between vectors $x,w\in\mathbb R^4$. Here's a fourth way to compute the same value, using a matrix transpose: $$w\cdot x=w^\top x = \begin{pmatrix} w_1 \\ w_2 \\ w_3 \\ w_4 \end{pmatrix}^\top \begin{pmatrix} x_1 \\ x_2 \\ x_3 \\ x_4 \end{pmatrix} =\begin{pmatrix} w_1 & w_2 & w_3 & w_4 \end{pmatrix} \begin{pmatrix} x_1 \\ x_2 \\ x_3 \\ x_4 \end{pmatrix}.$$ ## Least-squares To predict the heights of plants in the future, we would like to estimate $w$ given our data $\{(x^{(1)},h^{(1)}),\ldots,(x^{(n)},h^{(n)})\}.$ A good choice of $w$ will have the property that $(w\cdot x^{(p)}-h^{(p)})^2$ is close to zero for all $p\in\{1,\ldots,n\}$. Summing over our data points, a reasonable <i>objective function</i> measuring the quality of a potential choice of $w\in\mathbb R^4$ is the following: $$f(w):=\frac{1}{2}\sum_{p=1}^n (w\cdot x^{(p)}-h^{(p)})^2$$ <b>Exercise 1:</b> Write an expression for the partial derivative $\frac{\partial f}{\partial w_i}.$ <b>Solution 1:</b> $$\frac{\partial f}{\partial w_i} = \sum_{p=1}^n (w\cdot x^{(p)}-h^{(p)})x^{(p)}_i.$$ <b>Exercise 2:</b> Write an expression for the gradient $\nabla_w f(w)$, that is, the gradient of the function $f$ with respect to the variable $w$. <b>Solution 2:</b> $$\nabla_w f = \sum_{p=1}^n (w\cdot x^{(p)}-h^{(p)})x^{(p)}$$ ## Matrix notation We can construct a matrix $X\in\mathbb R^{n\times 4}$ containing all of our data points in its rows: $$X:=\begin{pmatrix} x^{(1)}_1 & x^{(1)}_2 & x^{(1)}_3 & x^{(1)}_4\\ x^{(2)}_1 & x^{(2)}_2 & x^{(2)}_3 & x^{(2)}_4 \\ \vdots & \vdots & \vdots & \vdots\\ x^{(n)}_1 & x^{(n)}_2 & x^{(n)}_3 & x^{(n)}_4 \end{pmatrix}.$$ Recall the vector norm: $$\|v\|_2:=\sqrt{v_1^2+v_2^2+\cdots+v_n^2}.$$ The following exercise is intended to help you remember linear algebra, including matrix-vector products, transposes, and vector norms. It may help to do some examples on paper. <b>Exercise 3:</b> Justify the expression $f(w)=\frac{1}{2} \|Xw - h\|_2^2$. Here, $h\in\mathbb R^n$ is the vector of $h$ values for all the plants. <b>Exercise 4:</b> Justify the expression $\nabla_w f(w) = X^\top(Xw - h).$ ## Coding Now, let's code this up. Run the following segment of code to load the appropriate Python libraries: ``` import numpy as np from numpy.linalg import norm from numpy.random import rand from matplotlib import pyplot as plt ``` <b>Exercise 5:</b> Implement $f(w)$ as a Python function; we will use NumPy to store matrices and vectors. ``` def f(w,X,h): return 0.5*norm(X.dot(w)-h)**2 ``` <b>Exercise 6:</b> Implement the gradient $\nabla_w f$ as a Python function. ``` def gradf(w,X,h): return X.transpose().dot(X.dot(w) - h) ``` We'll implement the most popular optimization algorithm to find a $w$ that minimizes $f(w)$, known as <i>gradient descent</i>. First, do some reading online about this technique; there are <i>many</i> great writeups of gradient descent, such as this one: https://towardsdatascience.com/machine-learning-101-an-intuitive-introduction-to-gradient-descent-366b77b52645 In the end, gradient descent updates an estimate of the best $w$ using the following formula: $$w\gets w - \eta \nabla_w f(w),$$ where $\eta>0$ is the <i>learning rate.</i> <b>Exercise 7:</b> Implement the gradient descent step in the code below. If it's implemented correctly, the objective value should go to near 0 in the plot. ``` # Synthesize a dataset of size 100 X = rand(100,4) w_groundtruth = np.array([[1],[2],[3],[4]]) y = X.dot(w_groundtruth) + 0.01*rand(100,1) # Parameters of gradient descent: Feel free to play with these! eta = 0.01 n_steps = 100 # Initialize a guess for the solution "w" w = np.zeros((4,1)) # We will use the following vector to keep track of objective values objective_values = np.zeros((n_steps,1)) for i in range(n_steps): w -= eta*gradf(w,X,y) # gradient descent step objective_values[i] = f(w,X,y) # store objective value # Plot the objective as a function of the iteration plt.plot(objective_values) plt.xlabel('Iteration counter') plt.ylabel('Objective function value') print(w) ``` Congratulations! You have implemented your first optimization problem and solver. Notice that the code also prints out the estimate you get from gradient descent of the unknown variable $w$, which should satisfy $w\approx(1,2,3,4).$ <b>Exercise 8:</b> You have implemented the basics of linear regression using least-squares, including a simple optimization algorithm. Discuss advantages and disadvantages of this technique for regression. <b>Solution 8:</b> Linear regression is an extremely simplistic model. It is relatively straightforward to optimize for $c$, using the technique above, direct linear system solvers, or other techniques (stochastic gradient descent, conjugate gradients, QR factorization, ...). On the other hand, many relationships are <i>nonlinear</i>. As a simple example, this model can predict negative numbers, which doesn't make sense! # SGD ``` def stochGradf(w,X,h): # pick a random training data point (n,d)=X.shape idx = np.random.randint(n) x = X[idx,:].reshape(1,d) # to make it into a proper row vector y = x*(x.dot(w)-h[idx]) # the actual stochastic gradient return y.T # annoying Python transpose junk needed! # Synthesize a dataset of size n x d n = 100 d = 4 X = rand(n,d) w_groundtruth = np.arange(d).reshape(d,1) y = X.dot(w_groundtruth) + 0.01*rand(n,1) # Parameters of gradient descent: Feel free to play with these! eta = 0.007 maxiter = 5000 # Initialize a guess for the solution "w" w = np.zeros((d,1)) # We will use the following vector to keep track of objective values objective_values = np.zeros((maxiter,1)) for i in range(maxiter): w -= eta*stochGradf(w,X,y) # gradient descent step objective_values[i] = f(w,X,y) # store objective value # Plot the objective as a function of the iteration plt.plot(objective_values) plt.xlabel('# of training data used') plt.ylabel('Objective function value') print(w) print(w_groundtruth) ```
github_jupyter
import numpy as np from numpy.linalg import norm from numpy.random import rand from matplotlib import pyplot as plt def f(w,X,h): return 0.5*norm(X.dot(w)-h)**2 def gradf(w,X,h): return X.transpose().dot(X.dot(w) - h) # Synthesize a dataset of size 100 X = rand(100,4) w_groundtruth = np.array([[1],[2],[3],[4]]) y = X.dot(w_groundtruth) + 0.01*rand(100,1) # Parameters of gradient descent: Feel free to play with these! eta = 0.01 n_steps = 100 # Initialize a guess for the solution "w" w = np.zeros((4,1)) # We will use the following vector to keep track of objective values objective_values = np.zeros((n_steps,1)) for i in range(n_steps): w -= eta*gradf(w,X,y) # gradient descent step objective_values[i] = f(w,X,y) # store objective value # Plot the objective as a function of the iteration plt.plot(objective_values) plt.xlabel('Iteration counter') plt.ylabel('Objective function value') print(w) def stochGradf(w,X,h): # pick a random training data point (n,d)=X.shape idx = np.random.randint(n) x = X[idx,:].reshape(1,d) # to make it into a proper row vector y = x*(x.dot(w)-h[idx]) # the actual stochastic gradient return y.T # annoying Python transpose junk needed! # Synthesize a dataset of size n x d n = 100 d = 4 X = rand(n,d) w_groundtruth = np.arange(d).reshape(d,1) y = X.dot(w_groundtruth) + 0.01*rand(n,1) # Parameters of gradient descent: Feel free to play with these! eta = 0.007 maxiter = 5000 # Initialize a guess for the solution "w" w = np.zeros((d,1)) # We will use the following vector to keep track of objective values objective_values = np.zeros((maxiter,1)) for i in range(maxiter): w -= eta*stochGradf(w,X,y) # gradient descent step objective_values[i] = f(w,X,y) # store objective value # Plot the objective as a function of the iteration plt.plot(objective_values) plt.xlabel('# of training data used') plt.ylabel('Objective function value') print(w) print(w_groundtruth)
0.712332
0.994984
### JCU MA5851 <p style="line-height: 1.5; font-size:14pt"> Student: Sacha Schwab <br> Location: Zurich, Switzerland </p> # Assessment 3 - Code for Part Two (WebCrawling) ``` from bs4 import BeautifulSoup import urllib3 import re import requests import datetime import pandas as pd import numpy as np from datetime import date import sys # Dataframe to hold the data columns = ['url', 'title', 'text', 'date'] df = pd.DataFrame(columns=columns) dir_path = 'data/' def get_response_code(url): print("Loading: " + url) status_codes = {} page = requests.get(url) status_code = page.status_code timestamp = datetime.datetime.now() status = url + " Timestamp: " + str(timestamp) + " Code: " + str(status_code) return(status_code, page) def get_soup(page): soup = BeautifulSoup(page.content, 'html.parser') return(soup) def get_title(soup): # Extract the title if soup.find('header', class_='caas-title-wrapper'): title = soup.find('header', class_='caas-title-wrapper').text.strip() return(title) else: return('') def get_date_time(soup): # Extract the date if soup.find('div', class_='caas-attr-time-style'): date = soup.find('div', class_='caas-attr-time-style').text.split("·")[0] return(date) else: return('') def get_text(soup): # Extract the article text art_text = soup.find('div', class_='caas-body').text return(art_text) def crawl_new_articles(): # Read the raw articles data df = pd.read_csv(dir_path + 'raw_data.csv') # Backup df.to_csv(dir_path + 'raw_data_backup' + str(date.today()) + '.csv') # Erase NaNs df = df.fillna('') # Filter the urls that have not yet been crawled df_todo = df[df['text'] == ''] # Loop through urls to crawl and get the data i = 0 for index, row in df_todo.iterrows(): # Dict to hold the sample data sample = {} # Get response code response_code, page = get_response_code(row['url']) print(str(response_code)) if response_code == 200: # Get the soup soup = get_soup(page) title = get_title(soup) if (len(title) > 0): df.loc[index, 'title'] = title text = get_text(soup) if len(text) > 0: df.loc[index, 'text'] = text df.loc[index, 'date_time'] = get_date_time(soup) else: print('dropping row') df = df.drop(index = index) else: df = df.drop(index = index) else: print('dropping') df = df.drop(index = index) # TEST df.to_csv(dir_path + 'raw_data.csv', index = False) return df df = crawl_new_articles() url = 'https://finance.yahoo.com/news/why-ethereum-is-gaining-traction-in-cryptocurrencys-horse-race-181618921.html' page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') soup if soup.find('header', class_='caas-title-wrapper'): print('ok') s = soup.find('div', class_='caas-body') print(s.text) df = pd.read_csv(dir_path + 'raw_data.csv') df.loc[0, 'title'] = 'some' df.head() def get_yahoo_crypto_news(): # Yahoo url yahoo_url = "https://finance.yahoo.com/topic/crypto/" # The class for the titles we are interested in title_class = 'mega-item-header-link' # Yahoo url prefix prefix = 'https://finance.yahoo.com/' # Get the soup and the status of the response soup, status = get_page_content(yahoo_url) # Loop through html items and extract the data article_items = soup.find_all('li', class_='js-stream-content Pos(r)') if article_items: i = 0 for item in article_items: sample = {} item_title = item.find("a", class_=soup_class) if item_title: sample['title'] = item_title.text.strip() a_class = item.find("a", class_="js-content-viewer", href=True) if a_class: url = prefix + a_class['href'] if url: sample['url'] = url # Get the full article text from url art_soup, art_status = get_page_content(url) art_text = '' for p in art_soup.find_all('p'): art_text = art_text + p.text #print(art_text) sample['text'] = art_text print(sample) if (i == 0): break else: print('None') get_yahoo_crypto_news_onlyurl() ``` ## Cryptocurrencies list crawler ``` url = 'https://finance.yahoo.com/cryptocurrencies/?count=25&offset=375' soup = '' page = requests.get(url, timeout=1) soup = get_soup(page) for s in soup.find_all(attrs={"aria-label" : "Symbol"}): print(s.text) print(s.find_next_sibling().text) from selenium import webdriver browser = webdriver.PhantomJS() browser.get(url) html = browser.page_source WebDriver driver => new FirefoxDriver() driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS) driver.get(url) html = browser.page_source ```
github_jupyter
from bs4 import BeautifulSoup import urllib3 import re import requests import datetime import pandas as pd import numpy as np from datetime import date import sys # Dataframe to hold the data columns = ['url', 'title', 'text', 'date'] df = pd.DataFrame(columns=columns) dir_path = 'data/' def get_response_code(url): print("Loading: " + url) status_codes = {} page = requests.get(url) status_code = page.status_code timestamp = datetime.datetime.now() status = url + " Timestamp: " + str(timestamp) + " Code: " + str(status_code) return(status_code, page) def get_soup(page): soup = BeautifulSoup(page.content, 'html.parser') return(soup) def get_title(soup): # Extract the title if soup.find('header', class_='caas-title-wrapper'): title = soup.find('header', class_='caas-title-wrapper').text.strip() return(title) else: return('') def get_date_time(soup): # Extract the date if soup.find('div', class_='caas-attr-time-style'): date = soup.find('div', class_='caas-attr-time-style').text.split("·")[0] return(date) else: return('') def get_text(soup): # Extract the article text art_text = soup.find('div', class_='caas-body').text return(art_text) def crawl_new_articles(): # Read the raw articles data df = pd.read_csv(dir_path + 'raw_data.csv') # Backup df.to_csv(dir_path + 'raw_data_backup' + str(date.today()) + '.csv') # Erase NaNs df = df.fillna('') # Filter the urls that have not yet been crawled df_todo = df[df['text'] == ''] # Loop through urls to crawl and get the data i = 0 for index, row in df_todo.iterrows(): # Dict to hold the sample data sample = {} # Get response code response_code, page = get_response_code(row['url']) print(str(response_code)) if response_code == 200: # Get the soup soup = get_soup(page) title = get_title(soup) if (len(title) > 0): df.loc[index, 'title'] = title text = get_text(soup) if len(text) > 0: df.loc[index, 'text'] = text df.loc[index, 'date_time'] = get_date_time(soup) else: print('dropping row') df = df.drop(index = index) else: df = df.drop(index = index) else: print('dropping') df = df.drop(index = index) # TEST df.to_csv(dir_path + 'raw_data.csv', index = False) return df df = crawl_new_articles() url = 'https://finance.yahoo.com/news/why-ethereum-is-gaining-traction-in-cryptocurrencys-horse-race-181618921.html' page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') soup if soup.find('header', class_='caas-title-wrapper'): print('ok') s = soup.find('div', class_='caas-body') print(s.text) df = pd.read_csv(dir_path + 'raw_data.csv') df.loc[0, 'title'] = 'some' df.head() def get_yahoo_crypto_news(): # Yahoo url yahoo_url = "https://finance.yahoo.com/topic/crypto/" # The class for the titles we are interested in title_class = 'mega-item-header-link' # Yahoo url prefix prefix = 'https://finance.yahoo.com/' # Get the soup and the status of the response soup, status = get_page_content(yahoo_url) # Loop through html items and extract the data article_items = soup.find_all('li', class_='js-stream-content Pos(r)') if article_items: i = 0 for item in article_items: sample = {} item_title = item.find("a", class_=soup_class) if item_title: sample['title'] = item_title.text.strip() a_class = item.find("a", class_="js-content-viewer", href=True) if a_class: url = prefix + a_class['href'] if url: sample['url'] = url # Get the full article text from url art_soup, art_status = get_page_content(url) art_text = '' for p in art_soup.find_all('p'): art_text = art_text + p.text #print(art_text) sample['text'] = art_text print(sample) if (i == 0): break else: print('None') get_yahoo_crypto_news_onlyurl() url = 'https://finance.yahoo.com/cryptocurrencies/?count=25&offset=375' soup = '' page = requests.get(url, timeout=1) soup = get_soup(page) for s in soup.find_all(attrs={"aria-label" : "Symbol"}): print(s.text) print(s.find_next_sibling().text) from selenium import webdriver browser = webdriver.PhantomJS() browser.get(url) html = browser.page_source WebDriver driver => new FirefoxDriver() driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS) driver.get(url) html = browser.page_source
0.133105
0.402069
# Densely Connected Networks (DenseNet) ResNet significantly changed the view of how to parametrize the functions in deep networks. DenseNet is to some extent the logical extension of this. To understand how to arrive at it, let's take a small detour to theory. Recall the Taylor expansion for functions. For scalars it can be written as $$f(x) = f(0) + f'(x) x + \frac{1}{2} f''(x) x^2 + \frac{1}{6} f'''(x) x^3 + o(x^3)$$ ## Function Decomposition The key point is that it decomposes the function into increasingly higher order terms. In a similar vein, ResNet decomposes functions into $$f(\mathbf{x}) = \mathbf{x} + g(\mathbf{x})$$ That is, ResNet decomposes $f$ into a simple linear term and a more complex nonlinear one. What if we want to go beyond two terms? A solution was proposed by :cite:`Huang.Liu.Van-Der-Maaten.ea.2017` in the form of DenseNet, an architecture that reported record performance on the ImageNet dataset. ![The main difference between ResNet (left) and DenseNet (right) in cross-layer connections: use of addition and use of concatenation. ](../img/densenet-block.svg) The key difference between ResNet and DenseNet is that in the latter case outputs are *concatenated* rather than added. As a result we perform a mapping from $\mathbf{x}$ to its values after applying an increasingly complex sequence of functions. $$\mathbf{x} \to \left[\mathbf{x}, f_1(\mathbf{x}), f_2(\mathbf{x}, f_1(\mathbf{x})), f_3(\mathbf{x}, f_1(\mathbf{x}), f_2(\mathbf{x}, f_1(\mathbf{x})), \ldots\right]$$ In the end, all these functions are combined in an MLP to reduce the number of features again. In terms of implementation this is quite simple - rather than adding terms, we concatenate them. The name DenseNet arises from the fact that the dependency graph between variables becomes quite dense. The last layer of such a chain is densely connected to all previous layers. The main components that compose a DenseNet are dense blocks and transition layers. The former defines how the inputs and outputs are concatenated, while the latter controls the number of channels so that it is not too large. ![Dense connections in DenseNet](../img/densenet.svg) ## Dense Blocks DenseNet uses the modified "batch normalization, activation, and convolution" architecture of ResNet (see the exercise in :numref:`chapter_resnet`). First, we implement this architecture in the `conv_block` function. ``` import d2l from mxnet import gluon, np, npx from mxnet.gluon import nn npx.set_np() def conv_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=3, padding=1)) return blk ``` A dense block consists of multiple `conv_block` units, each using the same number of output channels. In the forward computation, however, we concatenate the input and output of each block on the channel dimension. ``` class DenseBlock(nn.Block): def __init__(self, num_convs, num_channels, **kwargs): super(DenseBlock, self).__init__(**kwargs) self.net = nn.Sequential() for _ in range(num_convs): self.net.add(conv_block(num_channels)) def forward(self, X): for blk in self.net: Y = blk(X) # Concatenate the input and output of each block on the channel # dimension X = np.concatenate((X, Y), axis=1) return X ``` In the following example, we define a convolution block with two blocks of 10 output channels. When using an input with 3 channels, we will get an output with the $3+2\times 10=23$ channels. The number of convolution block channels controls the increase in the number of output channels relative to the number of input channels. This is also referred to as the growth rate. ``` blk = DenseBlock(2, 10) blk.initialize() X = np.random.uniform(size=(4, 3, 8, 8)) Y = blk(X) Y.shape ``` ## Transition Layers Since each dense block will increase the number of channels, adding too many of them will lead to an excessively complex model. A transition layer is used to control the complexity of the model. It reduces the number of channels by using the $1\times 1$ convolutional layer and halves the height and width of the average pooling layer with a stride of 2, further reducing the complexity of the model. ``` def transition_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=1), nn.AvgPool2D(pool_size=2, strides=2)) return blk ``` Apply a transition layer with 10 channels to the output of the dense block in the previous example. This reduces the number of output channels to 10, and halves the height and width. ``` blk = transition_block(10) blk.initialize() blk(Y).shape ``` ## DenseNet Model Next, we will construct a DenseNet model. DenseNet first uses the same single convolutional layer and maximum pooling layer as ResNet. ``` net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) ``` Then, similar to the four residual blocks that ResNet uses, DenseNet uses four dense blocks. Similar to ResNet, we can set the number of convolutional layers used in each dense block. Here, we set it to 4, consistent with the ResNet-18 in the previous section. Furthermore, we set the number of channels (i.e. growth rate) for the convolutional layers in the dense block to 32, so 128 channels will be added to each dense block. In ResNet, the height and width are reduced between each module by a residual block with a stride of 2. Here, we use the transition layer to halve the height and width and halve the number of channels. ``` # Num_channels: the current number of channels num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] for i, num_convs in enumerate(num_convs_in_dense_blocks): net.add(DenseBlock(num_convs, growth_rate)) # This is the number of output channels in the previous dense block num_channels += num_convs * growth_rate # A transition layer that halves the number of channels is added between # the dense blocks if i != len(num_convs_in_dense_blocks) - 1: num_channels //= 2 net.add(transition_block(num_channels)) ``` Similar to ResNet, a global pooling layer and fully connected layer are connected at the end to produce the output. ``` net.add(nn.BatchNorm(), nn.Activation('relu'), nn.GlobalAvgPool2D(), nn.Dense(10)) ``` ## Data Acquisition and Training Since we are using a deeper network here, in this section, we will reduce the input height and width from 224 to 96 to simplify the computation. ``` lr, num_epochs, batch_size = 0.1, 10, 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96) d2l.train_ch5(net, train_iter, test_iter, num_epochs, lr) ``` ## Summary * In terms of cross-layer connections, unlike ResNet, where inputs and outputs are added together, DenseNet concatenates inputs and outputs on the channel dimension. * The main units that compose DenseNet are dense blocks and transition layers. * We need to keep the dimensionality under control when composing the network by adding transition layers that shrink the number of channels again. ## Exercises 1. Why do we use average pooling rather than max-pooling in the transition layer? 1. One of the advantages mentioned in the DenseNet paper is that its model parameters are smaller than those of ResNet. Why is this the case? 1. One problem for which DenseNet has been criticized is its high memory consumption. * Is this really the case? Try to change the input shape to $224\times 224$ to see the actual (GPU) memory consumption. * Can you think of an alternative means of reducing the memory consumption? How would you need to change the framework? 1. Implement the various DenseNet versions presented in Table 1 of :cite:`Huang.Liu.Van-Der-Maaten.ea.2017`. 1. Why do we not need to concatenate terms if we are just interested in $\mathbf{x}$ and $f(\mathbf{x})$ for ResNet? Why do we need this for more than two layers in DenseNet? 1. Design a DenseNet for fully connected networks and apply it to the Housing Price prediction task. ## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2360) ![](../img/qr_densenet.svg)
github_jupyter
import d2l from mxnet import gluon, np, npx from mxnet.gluon import nn npx.set_np() def conv_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=3, padding=1)) return blk class DenseBlock(nn.Block): def __init__(self, num_convs, num_channels, **kwargs): super(DenseBlock, self).__init__(**kwargs) self.net = nn.Sequential() for _ in range(num_convs): self.net.add(conv_block(num_channels)) def forward(self, X): for blk in self.net: Y = blk(X) # Concatenate the input and output of each block on the channel # dimension X = np.concatenate((X, Y), axis=1) return X blk = DenseBlock(2, 10) blk.initialize() X = np.random.uniform(size=(4, 3, 8, 8)) Y = blk(X) Y.shape def transition_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=1), nn.AvgPool2D(pool_size=2, strides=2)) return blk blk = transition_block(10) blk.initialize() blk(Y).shape net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) # Num_channels: the current number of channels num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] for i, num_convs in enumerate(num_convs_in_dense_blocks): net.add(DenseBlock(num_convs, growth_rate)) # This is the number of output channels in the previous dense block num_channels += num_convs * growth_rate # A transition layer that halves the number of channels is added between # the dense blocks if i != len(num_convs_in_dense_blocks) - 1: num_channels //= 2 net.add(transition_block(num_channels)) net.add(nn.BatchNorm(), nn.Activation('relu'), nn.GlobalAvgPool2D(), nn.Dense(10)) lr, num_epochs, batch_size = 0.1, 10, 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96) d2l.train_ch5(net, train_iter, test_iter, num_epochs, lr)
0.898711
0.993049
## Solutions to auto-diagnostic test 3 Exercises By L.C. Félix-Herrán, Tec. ## 1) Zero-order-hold sampling with sampling period $h=0.1$ and process model $$ G(s) = \frac{s(s+0.3}{(s-0.2)(s+0.7)}. $$ ### Solution #### Step-response of the system $$ Y(s) = \frac{G(s)}{s} = \frac{(s+0.3}{(s-0.2)(s+0.7)} = \frac{0.556}{s-0.2} + \frac{0.444}{s+0.7} $$ $$ y(t) = \mathcal{L}^{-1} \{ Y(s) \} = 0.556\mathrm{e}^{0.2t} + 0.444 \mathrm{e}^{-0.7t}.$$ #### Sample and apply z-transform $$ Y(z) = 0.556 \mathcal{Z}\{(\mathrm{e}^{0.2h})^k\} + 0.444 \mathcal{Z}\{(\mathrm{e}^{-0.7h})^k\} = \frac{0.556 z}{z - \mathrm{e}^{0.2h}} + \frac{0.444z}{z - \mathrm{e}^{-0.7h}}. $$ #### Divide by z-transform of step input \begin{align} H(z) &= \frac{Y(z)}{U(z)} = \frac{z-1}{z} \left(\frac{0.556 z}{z - \mathrm{e}^{0.2h}} + \frac{0.444z}{z - \mathrm{e}^{-0.7h}} \right)\\ &= (z-1) \frac{ 0.556(z - \mathrm{e}^{-0.7h}) + 0.444(z - \mathrm{e}^{0.2h})}{(z - \mathrm{e}^{0.2h})(z - \mathrm{e}^{-0.7h})}\\ &= \frac{\big(z-1\big)\big( z - \big(0.556 \mathrm{e}^{-0.7h} - 0.444 \mathrm{e}^{0.2h}\big)\big)}{(z - \mathrm{e}^{0.2h})(z - \mathrm{e}^{-0.7h})}\\ \end{align} ## 2) Closed-loop system with plant model $$G(s) = \frac{27}{s(s+27)}. $$ ### a) Do ZOH-sampling #### Step-response of the system $$Y(z) = \frac{G(s)}{s} = \frac{27}{s^2(s+27)} = -\frac{1/27}{s} + \frac{1}{s^2} + \frac{1/27}{s+27}. $$ $$ y(t) = \mathcal{L}^{-1} \{ Y(s) \} = t - \frac{1}{27} + \frac{1}{27} \mathrm{e}^{-27t}.$$ #### Sample and apply z-transform $$ Y(z) = \frac{zh}{(z-1)^2} - \frac{1/27 z}{z-1} + \frac{1/27 z}{ z - \mathrm{e}^{-27h}}. $$ #### Divide by z-transform of step input \begin{align} H(z) &= \frac{Y(z)}{U(z)} = \frac{z-1}{z} \left(\frac{zh}{(z-1)^2} - \frac{1/27 z}{z-1} + \frac{1/27 z}{ z - \mathrm{e}^{-27h}} \right)\\ &= \frac{h}{z-1} - \frac{1}{27} + \frac{ 1/27(z-1)}{ z - \mathrm{e}^{-27h}}\\ &= \frac{h(z - \mathrm{e}^{-27h}) - \frac{1}{27}(z-1)(z - \mathrm{e}^{-27h}) + \frac{1}{27}(z-1)^2}{(z-1)(z - \mathrm{e}^{-27h})}\\ &= \frac{h (z - \mathrm{e}^{-27h}) - \frac{1}{27}(z^2 - (1+\mathrm{e}^{-27h})z + \mathrm{e}^{-27h}) + \frac{1}{27}(z^2 - 2z +1)}{(z-1)(z - \mathrm{e}^{-27h})}\\ &= \frac{\big(h - \frac{1}{27}(1-\mathrm{e}^{-27h})\big) z - \big(h\mathrm{e}^{-27h} + \frac{1}{27}\mathrm{e}^{-27h} - \frac{1}{27}\big)}{(z-1)(z - \mathrm{e}^{-27h})} \end{align} ``` import numpy as np h = 0.1 e27h = np.exp(-27*h) b0 = h - 1.0/27*(1 - e27h) b1 = -(h*e27h + 1.0/27*(e27h - 1)) print("exp(-27h) = %f, b0=%f, b1=%f" %(e27h, b0, b1)) ``` Pulse-transfer function for the process: $$ H(z) = \frac{0.0065 z + 0.028}{(z-1)(z-0.067)}. $$ ### b) Find difference equation corresponding to plant model Using the shift operator $\text{q}$ we have $$ c(k) = H(\text{q}) u(k) = \frac{0.0065\text{q} + 0.028}{(\text{q}-1)(\text{q} - 0.067)}u(k) $$ $$ (\text{q}-1)(\text{q} - 0.067) c(k) = (\frac{0.0065\text{q} + 0.028})u(k)$$ $$ (\text{q}^2 - 1.067\text{q} + 0.067) c(k) = (\frac{0.0065\text{q} + 0.028})u(k) $$ $$ c(k+2) - 1.067 c(k+1) + 0.067 c(k) = 0.0065 u(k+1) + 0.028 u(k) $$ ## 3) Given $$ G(s) = \frac{3s\mathrm{e}^{-2s}}{s+1}, $$ Determine the discretization of the system assuming that it is preceeded by a hold and proceeded by an ideal sampler for the following cases - **a)** ZOH and sampling period $h=0.5$ - **b)** ZOH and sampling period $h=0.3$ - **c)** FOH and sampling period $h=0.5$ ### Solutions #### b) The delay $\tau$ in the process is 2, so we have $$ \tau = 6h + mh$$ with $h=0.3$ and $ m = 2/3$. Let $y_0(t)$ be the step-response of the system without the delay, and $y_1(t)$ be the step-response of the system for a delay of $6h$. We get \begin{align} y_0(t) &= \mathcal{L}^{-1} \{ \frac{3s}{s+1} \cdot \frac{1}{s} \} = 3\mathrm{e}^{-t}u_H(t)\\ y_1(t) &= y_0(t-6h)\\ y(t) &= y_1(t-mh) = y_0(t-6h-mh) \end{align} Applying the z-transform to the sampled signal $y_1(kh)$ gives $$ Y_1(z) = z^{-6} Y_0(z) = z^{-6} \frac{3z}{z-\mathrm{e}^{-h}}. $$ In order to obtain the z-transform of the actual step-response $y(t)$ we need to use the modified z-transform (See the textbook Fernández y Ramírez, ch 4.6). \begin{align} Y(z, m) &= \sum_{k=0}^\infty y_1(kh+mh)z^{-k} = \sum_{k=0}^\infty 3\mathrm{e}^{-kh-mh}z^{-k}\\ &= \mathrm{e}^{-mh} \sum_{k=0}^\infty 3\mathrm{e}^{-kh}z^{-k} = \mathrm{e}^{-mh} Y_1(z)\\ &= \mathrm{e}^{-mh} \frac{3}{z^5(z-\mathrm{e}^{-h})} \end{align} ``` import numpy as np h = 0.3 m = 2.0/3.0 emh = np.exp(-m*h) eh = np.exp(-h) print('e^{-mh}=%s' %(emh)) print('e^{-h}=%s' %(eh)) 3*emh np.exp(-0.33*0.3) ``` With numerical values $$ Y(z,m=2/3) = 0.82 \frac{3}{z^5(z-0.74)}$$ All that is left is to divide by the z-transform of the step signal \begin{align} H(z) &= \frac{Y(z)}{U(z)} = \frac{z-1}{z}\frac{3\cdot 0.82}{z^5(z-0.74)}\\ &= \frac{2.46(z-1)}{z^6(z-0.74)} \end{align} ``` 0.44444444 + 0.555555555555 ```
github_jupyter
import numpy as np h = 0.1 e27h = np.exp(-27*h) b0 = h - 1.0/27*(1 - e27h) b1 = -(h*e27h + 1.0/27*(e27h - 1)) print("exp(-27h) = %f, b0=%f, b1=%f" %(e27h, b0, b1)) import numpy as np h = 0.3 m = 2.0/3.0 emh = np.exp(-m*h) eh = np.exp(-h) print('e^{-mh}=%s' %(emh)) print('e^{-h}=%s' %(eh)) 3*emh np.exp(-0.33*0.3) 0.44444444 + 0.555555555555
0.130618
0.964522
Version 1.0.2 # Pandas basics Hi! In this programming assignment you need to refresh your `pandas` knowledge. You will need to do several [`groupby`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.groupby.html)s and [`join`]()`s to solve the task. ``` import pandas as pd import numpy as np import os import matplotlib.pyplot as plt %matplotlib inline from grader import Grader DATA_FOLDER = '../readonly/final_project_data/' transactions = pd.read_csv(os.path.join(DATA_FOLDER, 'sales_train.csv.gz')) items = pd.read_csv(os.path.join(DATA_FOLDER, 'items.csv')) item_categories = pd.read_csv(os.path.join(DATA_FOLDER, 'item_categories.csv')) shops = pd.read_csv(os.path.join(DATA_FOLDER, 'shops.csv')) ``` The dataset we are going to use is taken from the competition, that serves as the final project for this course. You can find complete data description at the [competition web page](https://www.kaggle.com/c/competitive-data-science-final-project/data). To join the competition use [this link](https://www.kaggle.com/t/1ea93815dca248e99221df42ebde3540). ## Grading We will create a grader instace below and use it to collect your answers. When function `submit_tag` is called, grader will store your answer *locally*. The answers will *not* be submited to the platform immediately so you can call `submit_tag` function as many times as you need. When you are ready to push your answers to the platform you should fill your credentials and run `submit` function in the <a href="#Authorization-&-Submission">last paragraph</a> of the assignment. ``` grader = Grader() ``` # Task Let's start with a simple task. <ol start="0"> <li><b>Print the shape of the loaded dataframes and use [`df.head`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html) function to print several rows. Examine the features you are given.</b></li> </ol> ``` transactions.head() transactions.shape items.head() items.shape item_categories.head() item_categories.shape shops.head() shops.shape ``` Now use your `pandas` skills to get answers for the following questions. The first question is: 1. ** What was the maximum total revenue among all the shops in September, 2014?** * Hereinafter *revenue* refers to total sales minus value of goods returned. *Hints:* * Sometimes items are returned, find such examples in the dataset. * It is handy to split `date` field into [`day`, `month`, `year`] components and use `df.year == 14` and `df.month == 9` in order to select target subset of dates. * You may work with `date` feature as with srings, or you may first convert it to `pd.datetime` type with `pd.to_datetime` function, but do not forget to set correct `format` argument. ``` transactions_new = transactions.copy() transactions_new['revenue'] = transactions_new['item_price'] * transactions_new['item_cnt_day'] transactions_new['date'] = pd.to_datetime(transactions_new['date'], format='%d.%m.%Y') transactions_new['day'] = transactions_new['date'].apply(lambda x: x.day) transactions_new['month'] = transactions_new['date'].apply(lambda x: x.month) transactions_new['year'] = transactions_new['date'].apply(lambda x: x.year) grouped_shop = transactions_new.where((transactions_new.year == 2014) & (transactions_new.month == 9)).groupby('shop_id')['revenue'].sum() grouped_shop.max() max_revenue = grouped_shop.max() grader.submit_tag('max_revenue', max_revenue) ``` Great! Let's move on and answer another question: <ol start="2"> <li><b>What item category generated the highest revenue in summer 2014?</b></li> </ol> * Submit `id` of the category found. * Here we call "summer" the period from June to August. *Hints:* * Note, that for an object `x` of type `pd.Series`: `x.argmax()` returns **index** of the maximum element. `pd.Series` can have non-trivial index (not `[1, 2, 3, ... ]`). ``` data = transactions_new.join(items, on='item_id', how='left', lsuffix='_l').drop('item_id_l', axis=1) data.head() max_elem = data.where((data.month.isin(np.arange(6,9))) & (data.year == 2014)).groupby('item_category_id')['revenue'].sum().argmax() category_id_with_max_revenue = int(max_elem) grader.submit_tag('category_id_with_max_revenue', category_id_with_max_revenue) ``` <ol start="3"> <li><b>How many items are there, such that their price stays constant (to the best of our knowledge) during the whole period of time?</b></li> </ol> * Let's assume, that the items are returned for the same price as they had been sold. ``` num_items_constant_price = sum([1 if item==1 else 0 for item in transactions.groupby(['item_id'])['item_price'].nunique()]) num_items_constant_price = num_items_constant_price grader.submit_tag('num_items_constant_price', num_items_constant_price) ``` Remember, the data can sometimes be noisy. <ol start="4"> <li><b>What was the variance of the number of sold items per day sequence for the shop with `shop_id = 25` in December, 2014? Do not count the items, that were sold but returned back later.</b></li> </ol> * Fill `total_num_items_sold` and `days` arrays, and plot the sequence with the code below. * Then compute variance. Remember, there can be differences in how you normalize variance (biased or unbiased estimate, see [link](https://math.stackexchange.com/questions/496627/the-difference-between-unbiased-biased-estimator-variance)). Compute ***unbiased*** estimate (use the right value for `ddof` argument in `pd.var` or `np.var`). * If there were no sales at a given day, ***do not*** impute missing value with zero, just ignore that day ``` shop_id = 25 total_num_items_sold = transactions_new.where((transactions_new.year == 2014) & (transactions_new.month == 12) & (transactions_new.shop_id == 25)).groupby(['day'])['item_cnt_day'].sum() days = [day[0] for day in transactions_new.where((transactions_new.month==12) & (transactions_new.year==2014) & (transactions_new.shop_id==25)).groupby(['day'])] # Plot it plt.plot(days, total_num_items_sold) plt.ylabel('Num items') plt.xlabel('Day') plt.title("Daily revenue for shop_id = 25") plt.show() total_num_items_sold_var = np.var(total_num_items_sold, ddof=1) grader.submit_tag('total_num_items_sold_var', total_num_items_sold_var) ``` ## Authorization & Submission To submit assignment to Cousera platform, please, enter your e-mail and token into the variables below. You can generate token on the programming assignment page. *Note:* Token expires 30 minutes after generation. ``` STUDENT_EMAIL = '[email protected]' STUDENT_TOKEN = 'ytkUXGvkt0eSUNmx' grader.status() grader.submit(STUDENT_EMAIL, STUDENT_TOKEN) ``` Well done! :)
github_jupyter
import pandas as pd import numpy as np import os import matplotlib.pyplot as plt %matplotlib inline from grader import Grader DATA_FOLDER = '../readonly/final_project_data/' transactions = pd.read_csv(os.path.join(DATA_FOLDER, 'sales_train.csv.gz')) items = pd.read_csv(os.path.join(DATA_FOLDER, 'items.csv')) item_categories = pd.read_csv(os.path.join(DATA_FOLDER, 'item_categories.csv')) shops = pd.read_csv(os.path.join(DATA_FOLDER, 'shops.csv')) grader = Grader() transactions.head() transactions.shape items.head() items.shape item_categories.head() item_categories.shape shops.head() shops.shape transactions_new = transactions.copy() transactions_new['revenue'] = transactions_new['item_price'] * transactions_new['item_cnt_day'] transactions_new['date'] = pd.to_datetime(transactions_new['date'], format='%d.%m.%Y') transactions_new['day'] = transactions_new['date'].apply(lambda x: x.day) transactions_new['month'] = transactions_new['date'].apply(lambda x: x.month) transactions_new['year'] = transactions_new['date'].apply(lambda x: x.year) grouped_shop = transactions_new.where((transactions_new.year == 2014) & (transactions_new.month == 9)).groupby('shop_id')['revenue'].sum() grouped_shop.max() max_revenue = grouped_shop.max() grader.submit_tag('max_revenue', max_revenue) data = transactions_new.join(items, on='item_id', how='left', lsuffix='_l').drop('item_id_l', axis=1) data.head() max_elem = data.where((data.month.isin(np.arange(6,9))) & (data.year == 2014)).groupby('item_category_id')['revenue'].sum().argmax() category_id_with_max_revenue = int(max_elem) grader.submit_tag('category_id_with_max_revenue', category_id_with_max_revenue) num_items_constant_price = sum([1 if item==1 else 0 for item in transactions.groupby(['item_id'])['item_price'].nunique()]) num_items_constant_price = num_items_constant_price grader.submit_tag('num_items_constant_price', num_items_constant_price) shop_id = 25 total_num_items_sold = transactions_new.where((transactions_new.year == 2014) & (transactions_new.month == 12) & (transactions_new.shop_id == 25)).groupby(['day'])['item_cnt_day'].sum() days = [day[0] for day in transactions_new.where((transactions_new.month==12) & (transactions_new.year==2014) & (transactions_new.shop_id==25)).groupby(['day'])] # Plot it plt.plot(days, total_num_items_sold) plt.ylabel('Num items') plt.xlabel('Day') plt.title("Daily revenue for shop_id = 25") plt.show() total_num_items_sold_var = np.var(total_num_items_sold, ddof=1) grader.submit_tag('total_num_items_sold_var', total_num_items_sold_var) STUDENT_EMAIL = '[email protected]' STUDENT_TOKEN = 'ytkUXGvkt0eSUNmx' grader.status() grader.submit(STUDENT_EMAIL, STUDENT_TOKEN)
0.350866
0.899828
# Understanding the performance of the vecadd kernel _Vasileios Karakasis, CSCS_ In this section we will see how we can avoid the JIT cost of Numba, how we can measure the performance of the kernel without the `%timeit` magic, how we can use `nvprof`, the CUDA profiler to analyze the performance of the kernel, and finally, we will evaluate the performance of the kernel. ## Avoiding the JIT cost The previous exercise has shown that Numba will compile the CUDA kernel every time we call our program and, in order to amortize the compilation cost, we need several invocations. We would like to avoid this cost. Unlike the `@numba.jit` decorator, `@cuda.jit` does not accept a `cache` parameter, that would cache the generated code on the disk and use it on subsequent invocations of the program. Nonetheless, we can force the code generation at import time by supplying a function signature to the `@cuda.jit` decorator that describes the CUDA kernel. This will generate the CUDA code at the time when the decorator processes the function declaration and, therefore, we will avoid the runtime cost of JIT. Let's see how this is done: ``` import numba.cuda as cuda import numpy as np @cuda.jit('void(Array(float64, 1, "C"), Array(float64, 1, "C"), Array(float64, 1, "C"))') def _vecadd_cuda(z, x, y): '''The CUDA kernel''' i = cuda.grid(1) N = x.shape[0] if i >= N: return z[i] = x[i] + y[i] ``` This instructs the Numba runtime to compile the following function into a CUDA kernel (return type `void`) accepting three one-dimensional arrays of `float64` (or `double`) stored in row-major order (C convention). This way, Numba does not have to wait until the `_vecadd_cuda` function is called to figure out the argument types and compile the kernel. It can do this at import time, when it first encounters the function. The downside to that is that you can't call the function with a different type of arguments later. For more details on how you can specify function signatures in Numba, see [here](http://numba.pydata.org/numba-doc/latest/reference/types.html#numba-types). Let's retry our example now with this version of the kernel. ``` # Set up the host vectors N = 1000*1000*100 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device d_x = cuda.to_device(x) d_y = cuda.to_device(y) d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel %timeit -n1 -r1 _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) # Copy back the result to the host res = d_z.copy_to_host() # Validate the result assert np.allclose(x + y, res) ``` ### Exercise > Time the kernel with `%timeit -n1 -r1`. Try to increase the repetitions and experiment with different array sizes. What do you see? ## Measuring the execution time of the kernel All you see from the previous exercise is the same execution time! What is happening? Actually, you are not measuring **the kernel execution time**, but rather the **kernel launch time**. CUDA kernels are launched **asynchronously**. This means that, as soon as you launch the kernel on the GPU, the CPU will continue execution. In this case, it will continue executing and it will block at the statement that copies back the result to the host. How do we measure the kernel execution time then? For this, we are going to write a Python [context manager](https://docs.python.org/3.8/reference/datamodel.html?highlight=__getitem__#with-statement-context-managers) so as to measure the execution time of a region in a nice, Pythonic way: ``` import time class time_region: def __init__(self, time_offset=0): self._time_off = time_offset def __enter__(self): self._t_start = time.time() return self def __exit__(self, exc_type, exc_value, traceback): self._t_end = time.time() def elapsed_time(self): return self._time_off + (self._t_end - self._t_start) ``` For more details about context managers, please refer elsewhere. Let's use our timer to time the kernel: ``` # Set up the host vectors N = 200*1000*1000 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device d_x = cuda.to_device(x) d_y = cuda.to_device(y) d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel with time_region() as t_kernel: _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) print(f'CUDA kernel time: {t_kernel.elapsed_time()} s') # Copy back the result to the host res = d_z.copy_to_host() # Validate the result assert np.allclose(x + y, res) ``` Our timer seems to work fine; we still measure the kernel launch time as with `%timeit`. In order to measure the actual kernel execution time, we need to block the CPU calling thread until the kernel finishes, immediately after we launch the kernel. We can achieve that with `cuda.synchronize()`: ``` # Set up the host vectors N = 200*1000*1000 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device d_x = cuda.to_device(x) d_y = cuda.to_device(y) d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel with time_region() as t_kernel: _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) cuda.synchronize() with time_region() as t_ref: z = x + y print(f'CUDA kernel time: {t_kernel.elapsed_time()} s') print(f'Numpy time: {t_ref.elapsed_time()} s') # Copy back the result to the host res = d_z.copy_to_host() # Validate the result assert np.allclose(x + y, res) ``` Not bad, the CUDA kernel is 10x faster than the native Numpy kernel. Before analysing how good or bad this is, let's see an alternative way for measuring the kernel time that actually avoids the use of `cuda.synchronize()`. ## Measuring the kernel execution time with CUDA events Inserting `cuda.synchronize()` without a reason could slow down your application, since it not only blocks the current CPU thread, but also imposes a synchronization point for all the CUDA streams on the GPU that are currently running in parallel. > A CUDA stream is essentially a series of sequential operations (data transfers, kernel launches, etc.) that execute on the GPU. Multiple CUDA streams may run independently on the GPU, thus allowing overlapping of operations, such as data transfers and execution of kernels. To avoid this, but also to obtain a more precise measurement, you can use [CUDA events](http://docs.nvidia.com/cuda/cuda-c-programming-guide/#events). You can imagine CUDA events as milestones associated with timestamps that you can insert between operations in a CUDA stream. Let's how we can adapt our `time_region` context manager to use CUDA events: ``` class time_region_cuda: def __init__(self, time_offset=0, cuda_stream=0): self._t_start = cuda.event(timing=True) self._t_end = cuda.event(timing=True) self._time_off = time_offset self._cuda_stream = cuda_stream def __enter__(self): self._t_start.record(self._cuda_stream) return self def __exit__(self, exc_type, exc_value, traceback): self._t_end.record(self._cuda_stream) self._t_end.synchronize() def elapsed_time(self): return self._time_off + 1.e-3*cuda.event_elapsed_time(self._t_start, self._t_end) ``` To measure a data region with CUDA events you first need to create two events: one for the start and one for the end. You can achieve that with the `cuda.event(timing=True)`. To start counting, you need to call `record()` on the starting event marking the "arrival" to that milestone. Similarly, you call `record()` on the ending event to mark the end of the region. Then you can obtain the elapsed time using the corresponding function as shown in the example above. Let's rewrite our vector addition example using the CUDA event timers: ``` # Set up the host vectors N = 200*1000*1000 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device d_x = cuda.to_device(x) d_y = cuda.to_device(y) d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel with time_region_cuda() as t_kernel: _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) with time_region() as t_ref: z = x + y print(f'CUDA kernel time: {t_kernel.elapsed_time()} s') print(f'Numpy time: {t_ref.elapsed_time()} s') # Copy back the result to the host res = d_z.copy_to_host() # Validate the result assert np.allclose(x + y, res) ``` As we can see the execution time obtained is the correct one without having to use `cuda.synchronize()`. ## Assessing the performance of the kernel The question that arises is how good is the performance that we achieve. Let's inspect further the kernel. Each thread does two `float64` reads from the memory and one write and performs an addition. That means for one floating operation, the kernel must transfer to/from memory 24 bytes from the main memory. This gives us an *arithmetic intensity* or *flop:byte ratio* of 0.0417. The lower this ratio is for a computational kernel, the more likely will be that the kernel is memory bandwidth bound. As the ratio increases, the kernel tends to be more compute bound. The theory behind the arithmetic intensity is covered by the *Roofline* performance model, which is outside the scope of this tutorial. For the moment, let's compute two performance metrics, the `Gflop/s` achieved by the kernel and the data transfer rate to/from memory: ``` N print(f'Performance: {1.e-9*N/t_kernel.elapsed_time()} Gflop/s') print(f'Transfer rate: {1.e-9*3*N*8/t_kernel.elapsed_time()} GB/s') ``` Let's compute the same for the NumPy kernel: ``` print(f'Performance: {1.e-9*N/t_ref.elapsed_time()} Gflop/s') print(f'Transfer rate: {1.e-9*3*N*8/t_ref.elapsed_time()} GB/s') ``` As you can, the GPU can deliver more than 10x bandwidth compared to the CPU. Checking the [datasheet](https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/tesla-p100/pdf/nvidia-tesla-p100-datasheet.pdf) of the NVIDIA P100 GPU, we can see that the peak nominal memory bandwidth is 732 GB/s, meaning that our kernel utilizes 70% of the peak bandwidth. > Achieving the nominal peak memory bandwidth is usually not possible with real-life computational kernels, even with very low arithmetic intensity. For this reason, we tend to cite the *effective memory bandwidth*, which is obtained by benchmarks like the one presented in this tutorial. In fact, the effective memory bandwidth of the P100 GPUs is at ~550 GB/s, which essentially shows that the vector addition kernel's performance is optimal. For the Haswell CPUs on the host, the effective memory bandwidth is ~50 GB/s. ## Understanding the data transfer overhead So far we have only focused on the performance of the kernel. There is still a quite important topic we have not yet addressed. CUDA kernels require that the data they operate on is located on the device and we need to move that data there from the host. What is the cost of this data movement? Let's time our benchmark code: ``` # Set up the host vectors N = 200*1000*1000 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device with time_region_cuda() as t_copyin: d_x = cuda.to_device(x) d_y = cuda.to_device(y) with time_region_cuda() as t_create: d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel with time_region_cuda() as t_kernel: _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) with time_region() as t_ref: z = x + y print(f'CUDA kernel time: {t_kernel.elapsed_time()} s') print(f'Numpy time: {t_ref.elapsed_time()} s') # Copy back the result to the host with time_region_cuda() as t_copyout: res = d_z.copy_to_host() print(f'Copyin time: {t_copyin.elapsed_time()}') print(f'Create time: {t_create.elapsed_time()}') print(f'Copyout time: {t_copyout.elapsed_time()}') # Validate the result assert np.allclose(x + y, res) ``` The data copy times are quite important! In fact, if we include these in the total execution time of the GPU version, the CPU version becomes more than 8x faster! Data transfers is the No. 1 optimization that you should do when programming for the GPUs. You must minimize the data transfers to/from GPU by keeping the necessary data on the GPU for as long as it is needed. Before closing this discussion, let's see how fast is the data moved over to the GPU: ``` print(f'Copyin rate: {1e-9*2*N*8/t_copyin.elapsed_time()} GB/s') ``` This is bound by the data rate of the PCI 16x bus where the GPU is attached to and, it is indeed way too slower than the main memory bandwidth of modern processors. Interestingly, the copyout data rate seems to be much slower: ``` print(f'Copyout rate: {1e-9*N*8/t_copyout.elapsed_time()} GB/s') ``` By default, memory allocated on the host is pageable. This means that it can be moved by the OS kernel to a secondary storage device if there is not enough memory available on the system. This can incur a significant performance penalty, especially if you write on freshly allocated memory (as it happens in our example). You can avoid this overhead by using *page-locked* or *pinned* memory. This memory cannot be paged out and it is physically resident on the memory device. CUDA gives you the opportunity to use pinned memory and Numba allows you to create pinned ndarrays using the [cuda.pinned_array()](http://numba.pydata.org/numba-doc/latest/cuda-reference/memory.html#numba.cuda.pinned_array) function. > In order to keep track of which memory pages are resident on the physical memory and which are not, the OS kernel maintains a special data structure called *page table*. When you allocate memory on the host, the OS kernel simply creates a virtual memory mapping and it does not allocate any physical page. As soon as you start writing to the memory area you have been allocated, it will look for the page in its page tables and if not found, a *page fault* will be raised and then the kernel will have to physically allocate the missing memory page and update its page tables. Let's rewrite the copyout part using pinned memory: ``` res = cuda.pinned_array(N) with time_region_cuda() as t_pinned: d_z.copy_to_host(res) assert np.allclose(x + y, res) print(f'Copyout data rate (pinned): {1e-9*N*8/t_pinned.elapsed_time()} GB/s') ``` Notice how much the performance has improved. It is now even better than the copyin operation. However, pinned memory does not come without a cost. Since pinned pages cannot be paged out, they will stay on the physical memory, increasing the memory pressure and, finally, the effective memory consumption of the code. For memory hungry applications, this can be a problem. ### Exercise > Apply the pinned memory technique also to the input arrays `x` and `y`. ## Profiling the CUDA code In this simple example of vector addition we assessed the performance and identified the bottlenecks ourselves, by analyzing the code structure and reasoning about it. In more complex codes or codes that you are not very familiar with, it would be good if this analysis could be done by a dedicated tool. Not to be misunderstood, understanding the code structure and its memory and compute requirements is essential for optimizing it in any case, but using a *performance profiler* is very handy for analyzing the performance bottlenecks, for helping you prioritizing your optimization targets and for understanding how much room for improvement exists. > Remember to load the `cudatoolkit` module to make `nvprof` available! NVIDIA provides [Nsight](https://developer.nvidia.com/nsight-visual-studio-edition) and `nvprof` for profiling CUDA code. In this tutorial, we are going to use `nvprof` and the [Nvidia Visual Profiler](https://developer.nvidia.com/nvidia-visual-profiler) to inspect the results. > You may install the Nvidia Visual Profiler on your personal computer and visualize the performance results, even if you don't have a GPU. The `src/vecadd.py` file contains the vector addition example as we have finally presented it here. Let's do a basic profing: ```bash srun -C gpu nvprof -o vecadd.nvprof python src/vecadd.py $((200*1000*1000)) ``` A few notes: 1. To run on a node, drop `srun -C gpu` 2. Must use the correct Python interpreter, namely the `miniconda-pythonhpc` which contains all the packages needed 3. To do so: `export PYTHONPATH=""; export PATH=<path-to-python>:${PATH}` > If you want to profile your code with `nvprof` you should call `cuda.profile_stop()` at the end of your program. This profiling provides basic information about the data transfers and the execution time of the different kernels. It is the first step you need to take, because this will show you how much time you spend on transferring data to/from the device and which kernels are the most time consuming. Here is a screenshot for our example: ![Profiling of the vector addition benchmark](figs/vecadd-nvprof.png) Notice how much is the overhead of the data transfers as well as that of the pinned memory allocation of the `res` array. Placing your cursor on top of any of the regions in the timeline you can see more information. In this case, I have highlighted the copy-to-host operation, where you can see that the target memory on the host is pinned and the data rate is exactly as the one we have calculated above. As soon as you have addressed the data transfer issues, the next step is to identify the performance bottlenecks in the most time consuming kernels. To do that, you need more detailed information that reflects hardware events happening on the GPU (e.g., instructions executed, data transferred from the main memory, use of caches etc.). To achieve this you have to pass the `--analysis-metrics` option to `nvprof` as follows: ```bash srun -C gpu nvprof -o vecadd.detailed.nvprof --analysis-metrics python src/vecadd.py $((200*1000*1000)) ``` As you will notice this command incurs quite of an overhead. The reason behind that is that there are only a few hardware performance monitoring registers on the GPU, so in order for `nvprof` to collect all the necessary performance metrics, it will have to rerun the kernel several times. From this type of analysis you can obtain the actual memory bandwidth consumed by your kernel and this is what shows up for our vector addition kernel: ![Detailed profiling of the vector addition benchmark](figs/vecadd-detailed-nvprof.png) As expected the memory bandwidth consumption is red highlighted since it is the performance limiting factor, as we have also calculated manually. The bandwidth consumption reported here is 557 GB/s, which is quite close with our measurements based on the algorithm details. This concludes our discussion on measuring and analyzing the performance of a CUDA program written with Numba. ``` ! which python ```
github_jupyter
import numba.cuda as cuda import numpy as np @cuda.jit('void(Array(float64, 1, "C"), Array(float64, 1, "C"), Array(float64, 1, "C"))') def _vecadd_cuda(z, x, y): '''The CUDA kernel''' i = cuda.grid(1) N = x.shape[0] if i >= N: return z[i] = x[i] + y[i] # Set up the host vectors N = 1000*1000*100 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device d_x = cuda.to_device(x) d_y = cuda.to_device(y) d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel %timeit -n1 -r1 _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) # Copy back the result to the host res = d_z.copy_to_host() # Validate the result assert np.allclose(x + y, res) import time class time_region: def __init__(self, time_offset=0): self._time_off = time_offset def __enter__(self): self._t_start = time.time() return self def __exit__(self, exc_type, exc_value, traceback): self._t_end = time.time() def elapsed_time(self): return self._time_off + (self._t_end - self._t_start) # Set up the host vectors N = 200*1000*1000 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device d_x = cuda.to_device(x) d_y = cuda.to_device(y) d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel with time_region() as t_kernel: _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) print(f'CUDA kernel time: {t_kernel.elapsed_time()} s') # Copy back the result to the host res = d_z.copy_to_host() # Validate the result assert np.allclose(x + y, res) # Set up the host vectors N = 200*1000*1000 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device d_x = cuda.to_device(x) d_y = cuda.to_device(y) d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel with time_region() as t_kernel: _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) cuda.synchronize() with time_region() as t_ref: z = x + y print(f'CUDA kernel time: {t_kernel.elapsed_time()} s') print(f'Numpy time: {t_ref.elapsed_time()} s') # Copy back the result to the host res = d_z.copy_to_host() # Validate the result assert np.allclose(x + y, res) class time_region_cuda: def __init__(self, time_offset=0, cuda_stream=0): self._t_start = cuda.event(timing=True) self._t_end = cuda.event(timing=True) self._time_off = time_offset self._cuda_stream = cuda_stream def __enter__(self): self._t_start.record(self._cuda_stream) return self def __exit__(self, exc_type, exc_value, traceback): self._t_end.record(self._cuda_stream) self._t_end.synchronize() def elapsed_time(self): return self._time_off + 1.e-3*cuda.event_elapsed_time(self._t_start, self._t_end) # Set up the host vectors N = 200*1000*1000 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device d_x = cuda.to_device(x) d_y = cuda.to_device(y) d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel with time_region_cuda() as t_kernel: _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) with time_region() as t_ref: z = x + y print(f'CUDA kernel time: {t_kernel.elapsed_time()} s') print(f'Numpy time: {t_ref.elapsed_time()} s') # Copy back the result to the host res = d_z.copy_to_host() # Validate the result assert np.allclose(x + y, res) N print(f'Performance: {1.e-9*N/t_kernel.elapsed_time()} Gflop/s') print(f'Transfer rate: {1.e-9*3*N*8/t_kernel.elapsed_time()} GB/s') print(f'Performance: {1.e-9*N/t_ref.elapsed_time()} Gflop/s') print(f'Transfer rate: {1.e-9*3*N*8/t_ref.elapsed_time()} GB/s') # Set up the host vectors N = 200*1000*1000 x = np.random.rand(N) y = np.random.rand(N) # Copy and allocate data on the device with time_region_cuda() as t_copyin: d_x = cuda.to_device(x) d_y = cuda.to_device(y) with time_region_cuda() as t_create: d_z = cuda.device_array_like(x) # Set up the kernel invocation block_size = 128 num_blocks = N // block_size if N % block_size: num_blocks += 1 # Call the kernel with time_region_cuda() as t_kernel: _vecadd_cuda[num_blocks, block_size](d_z, d_x, d_y) with time_region() as t_ref: z = x + y print(f'CUDA kernel time: {t_kernel.elapsed_time()} s') print(f'Numpy time: {t_ref.elapsed_time()} s') # Copy back the result to the host with time_region_cuda() as t_copyout: res = d_z.copy_to_host() print(f'Copyin time: {t_copyin.elapsed_time()}') print(f'Create time: {t_create.elapsed_time()}') print(f'Copyout time: {t_copyout.elapsed_time()}') # Validate the result assert np.allclose(x + y, res) print(f'Copyin rate: {1e-9*2*N*8/t_copyin.elapsed_time()} GB/s') print(f'Copyout rate: {1e-9*N*8/t_copyout.elapsed_time()} GB/s') res = cuda.pinned_array(N) with time_region_cuda() as t_pinned: d_z.copy_to_host(res) assert np.allclose(x + y, res) print(f'Copyout data rate (pinned): {1e-9*N*8/t_pinned.elapsed_time()} GB/s') srun -C gpu nvprof -o vecadd.nvprof python src/vecadd.py $((200*1000*1000)) srun -C gpu nvprof -o vecadd.detailed.nvprof --analysis-metrics python src/vecadd.py $((200*1000*1000)) ! which python
0.608594
0.986979
``` # Dependencies from splinter import Browser from bs4 import BeautifulSoup import requests from webdriver_manager.chrome import ChromeDriverManager import pandas as pd ``` ## NASA Mars News ``` # Referenced Day 2 Ins-Splinter # Setup splinter executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) # Visit Nasa news url url = 'https://redplanetscience.com/' browser.visit(url) html = browser.html soup = BeautifulSoup(html, 'html.parser') news_title = soup.find_all('div', class_='content_title') news_p = soup.find_all('div', class_='article_teaser_body') # Print out latest news title and paragraph text for title in news_title: print(f"Title: ") print('---------') print(title.text) print(f"Paragraph: ") print('---------') for parag in news_p: print(parag.text) # Number of titles len(news_title) browser.quit() ``` ## JPL Mars Space Images - Featured Image ``` # Setup splinter executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) url = 'https://spaceimages-mars.com/' browser.visit(url) html = browser.html soup = BeautifulSoup(html, 'html.parser') # Referenced: https://www.geeksforgeeks.org/image-scraping-with-python/ for item in soup.find_all('img', class_='thumbimg'): print(url + item['src']) browser.quit() ``` ## Mars Facts ``` url = 'https://galaxyfacts-mars.com/' # Use Pandas to scrape tabular data from url tables = pd.read_html(url) type(tables) tables[0] #Display df # Referenced: https://stackoverflow.com/questions/61736164/how-can-i-set-second-row-as-a-name-of-columns-in-dataframe mars_diagram_df = tables[0] mars_diagram_df.columns = mars_diagram_df.iloc[0] mars_diagram_df = mars_diagram_df.iloc[1:].reset_index(drop=True) mars_diagram_df.set_index("Mars - Earth Comparison", inplace=True) mars_diagram_df tables[1] #Display df mars_planet_profile_df = tables[1] # Rename df/set_index mars_planet_profile_df.columns = ["Description" , "Information"] mars_planet_profile_df.set_index("Description", inplace=True) mars_planet_profile_df # Setup splinter executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) ``` ## Mars Hemispheres ``` # Setup splinter executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) url = 'https://marshemispheres.com/' browser.visit(url) html = browser.html soup = BeautifulSoup(html, 'html.parser') for item in soup.find_all('img', class_='thumb'): print(url + item['src']) # img_url # title ```
github_jupyter
# Dependencies from splinter import Browser from bs4 import BeautifulSoup import requests from webdriver_manager.chrome import ChromeDriverManager import pandas as pd # Referenced Day 2 Ins-Splinter # Setup splinter executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) # Visit Nasa news url url = 'https://redplanetscience.com/' browser.visit(url) html = browser.html soup = BeautifulSoup(html, 'html.parser') news_title = soup.find_all('div', class_='content_title') news_p = soup.find_all('div', class_='article_teaser_body') # Print out latest news title and paragraph text for title in news_title: print(f"Title: ") print('---------') print(title.text) print(f"Paragraph: ") print('---------') for parag in news_p: print(parag.text) # Number of titles len(news_title) browser.quit() # Setup splinter executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) url = 'https://spaceimages-mars.com/' browser.visit(url) html = browser.html soup = BeautifulSoup(html, 'html.parser') # Referenced: https://www.geeksforgeeks.org/image-scraping-with-python/ for item in soup.find_all('img', class_='thumbimg'): print(url + item['src']) browser.quit() url = 'https://galaxyfacts-mars.com/' # Use Pandas to scrape tabular data from url tables = pd.read_html(url) type(tables) tables[0] #Display df # Referenced: https://stackoverflow.com/questions/61736164/how-can-i-set-second-row-as-a-name-of-columns-in-dataframe mars_diagram_df = tables[0] mars_diagram_df.columns = mars_diagram_df.iloc[0] mars_diagram_df = mars_diagram_df.iloc[1:].reset_index(drop=True) mars_diagram_df.set_index("Mars - Earth Comparison", inplace=True) mars_diagram_df tables[1] #Display df mars_planet_profile_df = tables[1] # Rename df/set_index mars_planet_profile_df.columns = ["Description" , "Information"] mars_planet_profile_df.set_index("Description", inplace=True) mars_planet_profile_df # Setup splinter executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) # Setup splinter executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) url = 'https://marshemispheres.com/' browser.visit(url) html = browser.html soup = BeautifulSoup(html, 'html.parser') for item in soup.find_all('img', class_='thumb'): print(url + item['src']) # img_url # title
0.399226
0.292993
<a href="https://colab.research.google.com/github/Abhijit-2592/visualizing_cnns/blob/master/activation_maximization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` %tensorflow_version 2.x ``` In this and the saliency_map.ipynb notebooks we will try to understand the visualization technique presented in the following paper: [Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps](https://arxiv.org/pdf/1312.6034.pdf). Specifically we will visualize the idea given in Section 2: Class Model Visualisation The idea is: We are trying to find an input image such that the probability of the network predicting a specific class is maximum. This is again done by **gradient ascent**. ## NOTE: In the paper under section 3, the authors approximate the linear scoring function by using the 1st order term from the Tailor expansion of the softmax function (See equations 2 and 3). Instead of taking the 1st order term from the Tailor expansion we can do another trick to approximate a Linear scoring function: Just Swap the Softmax activation to a Linear/Identity activation in the output. ``` import tensorflow as tf import numpy as np import matplotlib.pyplot as plt np.random.seed(500) %matplotlib inline print(tf.__version__) print(np.__version__) model = tf.keras.applications.vgg16.VGG16(include_top=True, weights="imagenet") model.summary() # trick to get optimal visualizations: swap softmax with identity/linear model.get_layer("predictions").activation = None random_image = np.zeros((224, 224, 3)).astype(np.float32) plt.title("Random Image") plt.imshow(random_image) plt.show() random_image = np.expand_dims(random_image, axis=0) # reshape it to (1,224,224,3) def get_gradients(model, image, class_index): image_tensor = tf.convert_to_tensor(image, dtype="float32") with tf.GradientTape() as tape: # The visualization is very sensitive to the regularization parameter! regularizer_l2 = tf.keras.regularizers.l2(l=0.01) tape.watch(image_tensor) output = model(image_tensor) loss = tf.reduce_mean(output[:, class_index] - regularizer_l2(image_tensor)) grads = tape.gradient(loss, image_tensor) return grads ``` # Let's visualize the class Ouzel. It's imagenet index = 20 Ouzel is a bird: Just google it :P ``` step_size = 1 epochs = 1000 class_index = 20 # ouzel progbar = tf.keras.utils.Progbar(epochs) for i in range(epochs): grads = get_gradients(model, random_image, class_index=class_index) random_image += grads * step_size # + is gradient ascent progbar.update(i+1) def deprocess_image(x): """Utility function to convert a tensor into a valid image """ x = np.squeeze(x.numpy(), axis=0) x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 x += 0.5 x = np.clip(x, 0, 1) x *= 255 x = np.clip(x, 0, 255).astype('uint8') return x activation_maximization = deprocess_image(random_image) plt.figure(figsize=(6,6)) plt.imshow(activation_maximization) plt.show() ``` Ah well look at that! The network not only learnt what **Ouzel** Looks like but has also learnt different orientations and sizes of it. This shows that our classification network is scale and translational invarient. Thanks to Max pooling operations maybe? ;P
github_jupyter
%tensorflow_version 2.x import tensorflow as tf import numpy as np import matplotlib.pyplot as plt np.random.seed(500) %matplotlib inline print(tf.__version__) print(np.__version__) model = tf.keras.applications.vgg16.VGG16(include_top=True, weights="imagenet") model.summary() # trick to get optimal visualizations: swap softmax with identity/linear model.get_layer("predictions").activation = None random_image = np.zeros((224, 224, 3)).astype(np.float32) plt.title("Random Image") plt.imshow(random_image) plt.show() random_image = np.expand_dims(random_image, axis=0) # reshape it to (1,224,224,3) def get_gradients(model, image, class_index): image_tensor = tf.convert_to_tensor(image, dtype="float32") with tf.GradientTape() as tape: # The visualization is very sensitive to the regularization parameter! regularizer_l2 = tf.keras.regularizers.l2(l=0.01) tape.watch(image_tensor) output = model(image_tensor) loss = tf.reduce_mean(output[:, class_index] - regularizer_l2(image_tensor)) grads = tape.gradient(loss, image_tensor) return grads step_size = 1 epochs = 1000 class_index = 20 # ouzel progbar = tf.keras.utils.Progbar(epochs) for i in range(epochs): grads = get_gradients(model, random_image, class_index=class_index) random_image += grads * step_size # + is gradient ascent progbar.update(i+1) def deprocess_image(x): """Utility function to convert a tensor into a valid image """ x = np.squeeze(x.numpy(), axis=0) x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 x += 0.5 x = np.clip(x, 0, 1) x *= 255 x = np.clip(x, 0, 255).astype('uint8') return x activation_maximization = deprocess_image(random_image) plt.figure(figsize=(6,6)) plt.imshow(activation_maximization) plt.show()
0.711932
0.98948
# OpenACC Interoperability This lab is intended for Fortran programmers. If you prefer to use C/C++, click [this link.](../C/README.ipynb) --- ## Introduction The primary goal of this lab is to cover how to write an OpenACC code to work alongside other CUDA codes and accelerated libraries. There are several ways to make an OpenACC/CUDA interoperable code, and we will go through them one-by-one, with a short exercise for each. When programming in OpenACC, the distinction between CPU/GPU memory is abstracted. For the most part, you do not need to worry about explicitly differentiating between CPU and GPU pointers; the OpenACC runtime handles this for you. However, in CUDA, you do need to differentiate between these two types of pointers. Let's start with using CUDA allocated GPU data in our OpenACC code. --- ## OpenACC Deviceptr Clause The OpenACC `deviceptr` clause is used with the `data`, `parallel`, or `kernels` directives. It can be used in the same way as other data clauses such as `copyin`, `copyout`, `copy`, or `present`. The `deviceptr` clause is used to specify that a pointer is not a host pointer but rather a device pointer. This clause is important when working with OpenACC + CUDA interoperability because it is one way we can operate on CUDA allocated device data within an OpenACC code. Take the following example: **Allocation with CUDA Fortran** ```fortran integer, device, allocatable :: cuda_array(:) allocate(cuda_array(N)) ``` **Parallel Loop with OpenACC** ```fortran !$acc parallel loop deviceptr(cuda_array) do i = i, N cuda_array(i) = 0.0 end do ``` Normally, the OpenACC runtime expects to be given a host pointer, which will then be translated to some associated device pointer. However, when using CUDA to do our data management, we do not have that connection between host and device. The `deviceptr` clause is a way to tell the OpenACC runtime that a given pointer should not be translated since it is already a device pointer. This is a situation where CUDA Fortran has an advantage over CUDA C; since in CUDA Fortran you have to specify which arrays are on the device, the OpenACC runtime is often able fill in the blanks, and produce correct code results. Regardless, it is still proper design to use the deviceptr clause in such situations, and can avoid unexpected errors and confusion down the line. To practice using the deviceptr clause, we have a short exercise. We will examine two functions, both compute a dot product. The first code is [dot.f90](/edit/Fortran/deviceptr/dot.f90), which is a serial dot product. Next is [dot_acc.f90](/edit/Fortran/deviceptr/dot_acc.f90), which is an OpenACC parallelized version of dot. Both dot and dot_acc are called from [main.cuf](/edit/Fortran/deviceptr/main.cuf) (*note: .cuf is the conventional file extension for a CUDA Fortran source file*). In main.cu, we use host pointers to call dot, and device pointers to call dot_acc. Let's quickly run the code, it will produce an error. ``` !make -C deviceptr ``` To fix this error, we must edit [dot_acc.f90](/edit/Fortran/deviceptr/dot_acc.f90) and specify that arrays `A`, `B`, and `C` are on the device (`real, device, intent(in) :: A(m,n)`, for example). And while the code may not need it to work correctly, add the deviceptr clause to the parallel loop. When you think you have it, run the code below and see if the error is fixed. ``` !make -C deviceptr ``` Next, let's do the opposite. Let's take data that was allocated with OpenACC, and use it in a CUDA function. --- ## OpenACC host_data directive The `host_data` directive is used to make the OpenACC mapped device address available to the host. There are a few clauses that can be used with host_data, but the one that we are interested in using is `use_device`. We will use the `host_data` directive with the `use_device` clause to grab the underlying device pointer that OpenACC usually abstracts for us. Then we can use this device pointer to pass to CUDA kernels or to use accelerated libraries. Let's look at a code example: **Inside CUDA Fortran Code** ```fortran module example_cuda_m attributes (global) subroutine example_kernel(A, size) ! Kernel Code end subroutine subroutine example_cuda(A, size) use cudafor real, device, intent(int) :: A(:) integer, intent(in) :: size call mydot_kernel<<<512, 128>>>(A, size) return end subroutine end module example_cuda_m ``` **Inside OpenACC Code** ```fortran program main use cuda_example_m integer, allocatable :: A(:) allocate( A(100) ) !$acc data create(A(:)) !$acc host_data use_device(A) call example_cuda(A, 100) !$acc end host_data !$acc end data end program ``` A brief rundown of what is actually happening under-the-hood: the `data` directive creates a device copy of the array `A`, and the host pointer of `A` is linked to the device pointer of `A`. This is typical OpenACC behavior. Next, the `host_data use_device` translates the A variable on the host to the reference the device copy of data so that we can pass it to our CUDA function. To practice this, let's work on another code. We still have [dot.f90](/edit/Fortran/host_data/dot.f90) for our serial code. But instead of an OpenACC version of `dot`, we have a CUDA version in [dot_cuda.cuf](/edit/Fortran/host_data/dot_cuda.cuf). Both of these functions are called in [main.f90](/edit/Fortran/host_data/main.f90). First, let's run the code and see the error. ``` !make -C host_data ``` Now edit [main.90](/edit/Fortran/host_data/main.f90) and use the `host_data` and `use_device` to pass device pointers when calling our CUDA function. When you're ready, rerun the code below, and see if the error is fixed. ``` !make -C host_data ``` --- ## Using cuBLAS with OpenACC We are also able to use accelerated libraries with `host_data use_device` as well. Just like the previous section, we can allocate the data with OpenACC using either the `data` or `enter data` directive. Then, pass that data to a cuBLAS call with `host_data`. This code is slightly different than before; we will be working on a matrix multiplication code. The serial code is found in [matmult.f90](/edit/Fortran/cublas/matmult.f90). The cuBLAS code is in [matmult_cublas.f90](/edit/Fortran/cublas/matmult_cublas.f90). Both of these are called from [main.f90](/edit/Fortran/cublas/main.f90). Let's try running the code and seeing the error. ``` !make -C cublas ``` Now, edit [main.f90](/edit/C/cublas/main.f90) and use `host_data` and `use_device` on the cublas call (similar to what you did in the previous exercise). Rerun the code below when you're ready, and see if the error is fixed. ``` !make -C cublas ``` --- ## OpenACC map_data We briefly mentioned earlier about how OpenACC creates a mapping between host and device memory. When using CUDA allocated memory within OpenACC, that mapping is not created automatically, but it can be created manually. We are able to map a host address to a device address by using the OpenACC `acc_map_data(host_array, device_array, length_in_bytes)` function. Then, before the data is unallocated, you will use `acc_unmap_data(host_array)` to undo the mapping. Let's look at a quick example. ```fortran real, allocatable :: A(:) real, device, allocatable :: A_d(:) allocate( A(N) ) allocate( A_d(N) ) call acc_map_data(A, A_d, N*4) !$acc parallel loop present(A(:)) do i = 1, N A(i) = 0.0 end do call acc_unmap_data(A) deallocate( A ) deallocate( A_d ) ``` To practice, we have another example code which uses the `dot` functions again. Serial `dot` is in [dot.f90](/edit/Fortran/map/dot.f90). OpenACC `dot` is in [dot_acc.f90](/edit/Fortran/map/dot_acc.f90). Both of them are called from [main.cuf](/edit/Fortran/map/main.cuf). Try running the code and see the error. ``` !make -C map ``` Now, edit [main.cuf](/edit/Fortran/map/main.cuf) and add the OpenACC mapping functions before any of our OpenACC directives. When you're ready, rerun the code below and see if the error is fixed. ``` !make -C map ``` --- ## Routine The last topic to discuss is using CUDA `__device__` functions within OpenACC parallel and kernels regions. These are functions that are compiled to be called from the accelerator within a GPU kernel or OpenACC region. If you want to compile an OpenACC function to be used on the device, you will use the `routine` directive with the following syntax: ```fortran real function sqab(a) !$acc routine seq real :: a sqab = sqrt(abs(a)) end function ``` You can also have a function with a loop you want to parallelize like so: ```fortran subroutine test( x, n ) !$acc routine vector real, dimension(*) :: x integer :: n integer :: i !$acc loop vector do i=1, n x(i) = 0.0 enddo ``` To use CUDA device functions within our OpenACC loops, we can also use the `routine` directive. See the following example: **In CUDA Code** ```fortran module dist_cuda_m contains attributes(device) function dist_cuda(a, b) !$acc routine seq real :: a, b dist_cuda = sqrt(a*a + b*b) end function dist_cuda end module dist_cuda_m ``` **In OpenACC Code** ```fortran module distance_map_acc_m use dist_cuda_m use cudafor contains subroutine distance_map_acc(A, B, C, m, n) implicit none real, intent(in) :: A(:) real, intent(in) :: B(:) real, intent(inout) :: C(:,:) integer, intent(in) :: m, n integer :: i, j !$acc parallel loop copyin(A, B) copyout(C) do j = 1,m !$acc loop do i = 1,n C(i,j) = dist_cuda(A(j), B(i) ) end do end do end subroutine distance_map_acc end module distance_map_acc_m ``` Fortunately, if you're using CUDA Fortran (and depending on the compiler) you may not need to even include explicit routine information. However, it is considered proper to include routine information in these kinds of situations. --- ## Bonus Task Here are some additional resources for OpenACC/CUDA interoperability: [This is an NVIDIA devblog about some common techniques for implementing OpenACC + CUDA](https://devblogs.nvidia.com/3-versatile-openacc-interoperability-techniques/) [This is a github repo with some additional code examples demonstrating the lessons covered in this lab](https://github.com/jefflarkin/openacc-interoperability) --- ## Post-Lab Summary If you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below. ``` %%bash rm -f openacc_files.zip zip -r openacc_files.zip * ``` **After** executing the above zip command, you should be able to download the zip file [here](files/openacc_files.zip)
github_jupyter
integer, device, allocatable :: cuda_array(:) allocate(cuda_array(N)) !$acc parallel loop deviceptr(cuda_array) do i = i, N cuda_array(i) = 0.0 end do !make -C deviceptr !make -C deviceptr module example_cuda_m attributes (global) subroutine example_kernel(A, size) ! Kernel Code end subroutine subroutine example_cuda(A, size) use cudafor real, device, intent(int) :: A(:) integer, intent(in) :: size call mydot_kernel<<<512, 128>>>(A, size) return end subroutine end module example_cuda_m program main use cuda_example_m integer, allocatable :: A(:) allocate( A(100) ) !$acc data create(A(:)) !$acc host_data use_device(A) call example_cuda(A, 100) !$acc end host_data !$acc end data end program !make -C host_data !make -C host_data !make -C cublas !make -C cublas real, allocatable :: A(:) real, device, allocatable :: A_d(:) allocate( A(N) ) allocate( A_d(N) ) call acc_map_data(A, A_d, N*4) !$acc parallel loop present(A(:)) do i = 1, N A(i) = 0.0 end do call acc_unmap_data(A) deallocate( A ) deallocate( A_d ) !make -C map !make -C map real function sqab(a) !$acc routine seq real :: a sqab = sqrt(abs(a)) end function subroutine test( x, n ) !$acc routine vector real, dimension(*) :: x integer :: n integer :: i !$acc loop vector do i=1, n x(i) = 0.0 enddo module dist_cuda_m contains attributes(device) function dist_cuda(a, b) !$acc routine seq real :: a, b dist_cuda = sqrt(a*a + b*b) end function dist_cuda end module dist_cuda_m module distance_map_acc_m use dist_cuda_m use cudafor contains subroutine distance_map_acc(A, B, C, m, n) implicit none real, intent(in) :: A(:) real, intent(in) :: B(:) real, intent(inout) :: C(:,:) integer, intent(in) :: m, n integer :: i, j !$acc parallel loop copyin(A, B) copyout(C) do j = 1,m !$acc loop do i = 1,n C(i,j) = dist_cuda(A(j), B(i) ) end do end do end subroutine distance_map_acc end module distance_map_acc_m %%bash rm -f openacc_files.zip zip -r openacc_files.zip *
0.450359
0.935287
# Launch Turi Create ``` import turicreate ``` # Load house sales data ``` sales = turicreate.SFrame('home_data.sframe/') len(sales) sales.head(5) ``` # Explore ``` sales.show() turicreate.show(sales[1:5000]['sqft_living'],sales[1:5000]['price']) ``` # Simple regression model that predicts price from square feet ``` training_set, test_set = sales.random_split(.8,seed=0) ``` ## train simple regression model ``` sqft_model = turicreate.linear_regression.create(training_set,target='price',features=['sqft_living']) ``` # Evaluate the quality of our model ``` print (test_set['price'].mean()) print (sqft_model.evaluate(test_set)) ``` # Explore model a little further ``` sqft_model.coefficients import matplotlib.pyplot as plt plt.figure(figsize=(16,9)) plt.plot(test_set['sqft_living'],test_set['price'],'.') plt.plot(test_set['sqft_living'],sqft_model.predict(test_set),'-') plt.grid(True) ``` # Explore other features of the data ``` my_features = ['bedrooms','bathrooms','sqft_living','sqft_lot','floors','zipcode'] sales[my_features].show() turicreate.show(sales['zipcode'],sales['price']) ``` # Build a model with these additional features ``` my_features_model = turicreate.linear_regression.create(training_set,target='price',features=my_features) ``` # Compare simple model with more complex one ``` print (my_features) print (sqft_model.evaluate(test_set)) print (my_features_model.evaluate(test_set)) ``` # Apply learned models to make predictions ``` house1 = sales[sales['id']=='5309101200'] house1 ``` <img src="http://blue.kingcounty.com/Assessor/eRealProperty/MediaHandler.aspx?Media=2916871"> ``` print (house1['price']) print (sqft_model.predict(house1)) print (my_features_model.predict(house1)) ``` ## Prediction for a second house, a fancier one ``` house2 = sales[sales['id']=='1925069082'] house2 ``` <img src="https://ssl.cdn-redfin.com/photo/1/bigphoto/302/734302_0.jpg"> ``` print (sqft_model.predict(house2)) print (my_features_model.predict(house2)) ``` ## Prediction for a super fancy home ``` bill_gates = {'bedrooms':[8], 'bathrooms':[25], 'sqft_living':[50000], 'sqft_lot':[225000], 'floors':[4], 'zipcode':['98039'], 'condition':[10], 'grade':[10], 'waterfront':[1], 'view':[4], 'sqft_above':[37500], 'sqft_basement':[12500], 'yr_built':[1994], 'yr_renovated':[2010], 'lat':[47.627606], 'long':[-122.242054], 'sqft_living15':[5000], 'sqft_lot15':[40000]} ``` <img src="https://upload.wikimedia.org/wikipedia/commons/2/26/Residence_of_Bill_Gates.jpg"> ``` print (my_features_model.predict(turicreate.SFrame(bill_gates))) ```
github_jupyter
import turicreate sales = turicreate.SFrame('home_data.sframe/') len(sales) sales.head(5) sales.show() turicreate.show(sales[1:5000]['sqft_living'],sales[1:5000]['price']) training_set, test_set = sales.random_split(.8,seed=0) sqft_model = turicreate.linear_regression.create(training_set,target='price',features=['sqft_living']) print (test_set['price'].mean()) print (sqft_model.evaluate(test_set)) sqft_model.coefficients import matplotlib.pyplot as plt plt.figure(figsize=(16,9)) plt.plot(test_set['sqft_living'],test_set['price'],'.') plt.plot(test_set['sqft_living'],sqft_model.predict(test_set),'-') plt.grid(True) my_features = ['bedrooms','bathrooms','sqft_living','sqft_lot','floors','zipcode'] sales[my_features].show() turicreate.show(sales['zipcode'],sales['price']) my_features_model = turicreate.linear_regression.create(training_set,target='price',features=my_features) print (my_features) print (sqft_model.evaluate(test_set)) print (my_features_model.evaluate(test_set)) house1 = sales[sales['id']=='5309101200'] house1 print (house1['price']) print (sqft_model.predict(house1)) print (my_features_model.predict(house1)) house2 = sales[sales['id']=='1925069082'] house2 print (sqft_model.predict(house2)) print (my_features_model.predict(house2)) bill_gates = {'bedrooms':[8], 'bathrooms':[25], 'sqft_living':[50000], 'sqft_lot':[225000], 'floors':[4], 'zipcode':['98039'], 'condition':[10], 'grade':[10], 'waterfront':[1], 'view':[4], 'sqft_above':[37500], 'sqft_basement':[12500], 'yr_built':[1994], 'yr_renovated':[2010], 'lat':[47.627606], 'long':[-122.242054], 'sqft_living15':[5000], 'sqft_lot15':[40000]} print (my_features_model.predict(turicreate.SFrame(bill_gates)))
0.173673
0.942665
# Lists Lists in Python represent ordered sequences of values. Here is an example of how to create them: ``` primes = [2, 3, 5, 7] ``` We can put other types of things in lists: ``` planets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune'] print(planets) ``` We can even make a list of lists: ``` hands = [ ['J', 'Q', 'K'], ['2', '2', '2'], ['6', 'A', 'K'], # (Comma after the last element is optional) ] # (I could also have written this on one line, but it can get hard to read) hands = [['J', 'Q', 'K'], ['2', '2', '2'], ['6', 'A', 'K']] print(hands) ``` A list can contain a mix of different types of variables: ``` my_favourite_things = [32, 'raindrops on roses', help] # (Yes, Python's help function is *definitely* one of my favourite things) print(my_favourite_things) ``` ## Indexing You can access individual list elements with square brackets. Which planet is closest to the sun? Python uses *zero-based* indexing, so the first element has index 0. ``` planets[0] ``` What's the next closest planet? ``` planets[1] ``` Which planet is *furthest* from the sun? Elements at the end of the list can be accessed with negative numbers, starting from -1: ``` planets[-1] planets[-2] ``` ## Slicing What are the first three planets? We can answer this question using *slicing*: ``` planets[0:3] ``` `planets[0:3]` is our way of asking for the elements of `planets` starting from index 0 and continuing up to *but not including* index 3. The starting and ending indices are both optional. If I leave out the start index, it's assumed to be 0. So I could rewrite the expression above as: ``` planets[:3] ``` If I leave out the end index, it's assumed to be the length of the list. ``` planets[3:] ``` i.e. the expression above means "give me all the planets from index 3 onward". We can also use negative indices when slicing: ``` # All the planets except the first and last planets[1:-1] # The last 3 planets planets[-3:] ``` ## Changing lists Lists are "mutable", meaning they can be modified "in place". One way to modify a list is to assign to an index or slice expression. For example, let's say we want to rename Mars: ``` planets[3] = 'Malacandra' planets planets[0] = 'alamin' planets ``` Hm, that's quite a mouthful. Let's compensate by shortening the names of the first 3 planets. ``` planets[:3] = ['Mur', 'Vee', 'Ur'] print(planets) # That was silly. Let's give them back their old names planets[:4] = ['Mercury', 'Venus', 'Earth', 'Mars',] ``` ## List functions Python has several useful functions for working with lists. `len` gives the length of a list: ``` # How many planets are there? len(planets) ``` `sorted` returns a sorted version of a list: ``` # The planets sorted in alphabetical order sorted(planets) ``` `sum` does what you might expect: ``` primes = [2, 3, 5, 7] sum(primes) min(primes) max(primes) ``` We've previously used the `min` and `max` to get the minimum or maximum of several arguments. But we can also pass in a single list argument. ``` max(primes) ``` ## Interlude: objects I've used the term 'object' a lot so far - you may have even read that *everything* in Python is an object. What does that mean? In short, objects carry some things around with them. You access that stuff using Python's dot syntax. For example, numbers in Python carry around an associated variable called `imag` representing their imaginary part. (You'll probably never need to use this unless you're doing some very weird math.) ``` x = 12 # x is a real number, so its imaginary part is 0. print(x.imag) # Here's how to make a complex number, in case you've ever been curious: c = 12 + 3j print(c.imag) ``` The things an object carries around can also include functions. A function attached to an object is called a **method**. (Non-function things attached to an object, such as `imag`, are called *attributes*). For example, numbers have a method called `bit_length`. Again, we access it using dot syntax: ``` x.bit_length ``` To actually call it, we add parentheses: ``` x.bit_length() ``` > **Aside:** You've actually been calling methods already if you've been doing the exercises. In the exercise notebooks `q1`, `q2`, `q3`, etc. are all objects which have methods called `check`, `hint`, and `solution`. In the same way that we can pass functions to the `help` function (e.g. `help(max)`), we can also pass in methods: ``` help(x.bit_length) ``` <!-- TODO: dir? A useful builtin method for interacting with objects is `dir`. `dir` asks: what are the names of all the things (methods, and attributes) that this object is carrying around? help(x)? --> The examples above were utterly obscure. None of the types of objects we've looked at so far (numbers, functions, booleans) have attributes or methods you're likely ever to use. But it turns out that lists have several methods which you'll use all the time. ## List methods `list.append` modifies a list by adding an item to the end: ``` # Pluto is a planet darn it! planets.append('Pluto') ``` Why does the cell above have no output? Let's check the documentation by calling `help(planets.append)`. > **Aside:** `append` is a method carried around by *all* objects of type list, not just `planets`, so we also could have called `help(list.append)`. However, if we try to call `help(append)`, Python will complain that no variable exists called "append". The "append" name only exists within lists - it doesn't exist as a standalone name like builtin functions such as `max` or `len`. ``` help(planets.append) ``` The `-> None` part is telling us that `list.append` doesn't return anything. But if we check the value of `planets`, we can see that the method call modified the value of `planets`: ``` planets ``` `list.pop` removes and returns the last element of a list: ``` planets.pop() planets ``` ### Searching lists Where does Earth fall in the order of planets? We can get its index using the `list.index` method. ``` planets.index('Earth') ``` It comes third (i.e. at index 2 - 0 indexing!). At what index does Pluto occur? ``` planets.index('Pluto') ``` Oh, that's right... To avoid unpleasant surprises like this, we can use the `in` operator to determine whether a list contains a particular value: ``` # Is Earth a planet? "Earth" in planets # Is Calbefraques a planet? "Calbefraques" in planets ``` There are a few more interesting list methods we haven't covered. If you want to learn about all the methods and attributes attached to a particular object, we can call `help()` on the object itself. For example, `help(planets)` will tell us about *all* the list methods: ``` help(planets) ``` Click the "output" button to see the full help page. Lists have lots of methods with weird-looking names like `__eq__` and `__iadd__`. Don't worry too much about these for now. (You'll probably never call such methods directly. But they get called behind the scenes when we use syntax like indexing or comparison operators.) The most interesting methods are toward the bottom of the list (`append`, `clear`, `copy`, etc.). ## Tuples Tuples are almost exactly the same as lists. They differ in just two ways. **1:** The syntax for creating them uses parentheses instead of square brackets ``` t = (1, 2, 3) t = 1, 2, 3 # equivalent to above t ``` **2:** They cannot be modified (they are *immutable*). ``` t[0] = 100 ``` Tuples are often used for functions that have multiple return values. For example, the ``as_integer_ratio()`` method of float objects returns a numerator and a denominator in the form of a tuple: ``` x = 0.125 x.as_integer_ratio() ``` These multiple return values can be individually assigned as follows: ``` numerator, denominator = x.as_integer_ratio() print(numerator / denominator) ``` Finally we have some insight into the classic Stupid Python Trick™ for swapping two variables! ``` a = 1 b = 0 a, b = b, a print(a, b) ``` # Your Turn You learn best by writing code, not just reading it. So try **[the coding challenge](https://www.kaggle.com/kernels/fork/1275173)** now. --- *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161283) to chat with other Learners.*
github_jupyter
primes = [2, 3, 5, 7] planets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune'] print(planets) hands = [ ['J', 'Q', 'K'], ['2', '2', '2'], ['6', 'A', 'K'], # (Comma after the last element is optional) ] # (I could also have written this on one line, but it can get hard to read) hands = [['J', 'Q', 'K'], ['2', '2', '2'], ['6', 'A', 'K']] print(hands) my_favourite_things = [32, 'raindrops on roses', help] # (Yes, Python's help function is *definitely* one of my favourite things) print(my_favourite_things) planets[0] planets[1] planets[-1] planets[-2] planets[0:3] planets[:3] planets[3:] # All the planets except the first and last planets[1:-1] # The last 3 planets planets[-3:] planets[3] = 'Malacandra' planets planets[0] = 'alamin' planets planets[:3] = ['Mur', 'Vee', 'Ur'] print(planets) # That was silly. Let's give them back their old names planets[:4] = ['Mercury', 'Venus', 'Earth', 'Mars',] # How many planets are there? len(planets) # The planets sorted in alphabetical order sorted(planets) primes = [2, 3, 5, 7] sum(primes) min(primes) max(primes) max(primes) x = 12 # x is a real number, so its imaginary part is 0. print(x.imag) # Here's how to make a complex number, in case you've ever been curious: c = 12 + 3j print(c.imag) x.bit_length x.bit_length() help(x.bit_length) # Pluto is a planet darn it! planets.append('Pluto') help(planets.append) planets planets.pop() planets planets.index('Earth') planets.index('Pluto') # Is Earth a planet? "Earth" in planets # Is Calbefraques a planet? "Calbefraques" in planets help(planets) t = (1, 2, 3) t = 1, 2, 3 # equivalent to above t t[0] = 100 x = 0.125 x.as_integer_ratio() numerator, denominator = x.as_integer_ratio() print(numerator / denominator) a = 1 b = 0 a, b = b, a print(a, b)
0.528777
0.983518
``` import torch import torch.optim as optim import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import numpy as np import time import matplotlib import matplotlib.pyplot as plt import dataset.dataset as dataset import datasplit.datasplit as datasplit import model.models as models import trainer.trainer as trainer import utils.utils as utils torch.cuda.device_count() cuda0 = torch.device('cuda:0') cuda1 = torch.device('cuda:1') cuda2 = torch.device('cuda:2') cuda3 = torch.device('cuda:3') device = torch.device(cuda0 if torch.cuda.is_available() else "cpu") ``` # INIT ``` # transforms transform = transforms.Compose([ transforms.ToTensor(), ]) # dataset root = '/Volumes/Macintosh HD/DATASETS/GUITAR-FX/Mono_Discrete' excl_folders = ['MT2'] spectra_folder= 'mel_22050_1024_512' proc_settings_csv = 'proc_settings.csv' max_num_settings=3 dataset = dataset.FxDataset(root=root, excl_folders=excl_folders, spectra_folder=spectra_folder, processed_settings_csv=proc_settings_csv, max_num_settings=max_num_settings, transform=transform) dataset.init_dataset() # dataset.generate_mel() # split split = datasplit.DataSplit(dataset, shuffle=True) # loaders train_loader, val_loader, test_loader = split.get_split(batch_size=100) print('dataset size: ', len(dataset)) print('train set size: ', len(split.train_sampler)) print('val set size: ', len(split.val_sampler)) print('test set size: ', len(split.test_sampler)) dataset.fx_to_label ``` # TRAIN MultiNET ``` # model net = models.MultiNet(n_classes=dataset.num_fx, n_settings=dataset.max_num_settings).to(device) # optimizer optimizer = optim.Adam(net.parameters(), lr=0.001) # loss function loss_func_fx = nn.CrossEntropyLoss() loss_func_set = nn.MSELoss(reduction='mean') print(net) print('Trainable Params: ', sum(p.numel() for p in net.parameters() if p.requires_grad)) # SAVE models_folder = '../../saved/models' model_name = '20201110_multinet_mono_disc_best' results_folder = '../../saved/results' results_subfolder = '20201110_multinet_mono_disc' # TRAIN and TEST MultiNet OVER MULTIPLE EPOCHS train_set_size = len(split.train_sampler) val_set_size = len(split.val_sampler) test_set_size = len(split.test_sampler) all_train_losses, all_val_losses, all_test_losses = [],[],[] all_train_correct, all_val_correct, all_test_correct = [],[],[] all_train_results, all_val_results, all_test_results = [],[],[] best_val_correct = 0 start = time.time() for epoch in range(50): train_loss, train_correct, train_results = trainer.train_multi_net( model=net, optimizer=optimizer, train_loader=train_loader, train_sampler=split.train_sampler, epoch=epoch, loss_function_fx=loss_func_fx, loss_function_set=loss_func_set, device=device ) val_loss, val_correct, val_results = trainer.val_multi_net( model=net, val_loader=val_loader, val_sampler=split.val_sampler, loss_function_fx=loss_func_fx, loss_function_set=loss_func_set, device=device ) test_loss, test_correct, test_results = trainer.test_multi_net( model=net, test_loader=test_loader, test_sampler=split.test_sampler, loss_function_fx=loss_func_fx, loss_function_set=loss_func_set, device=device ) # save model if val_correct > best_val_correct: best_val_correct = val_correct torch.save(net, '%s/%s' % (models_folder, model_name)) print('\n=== saved best model ===\n') # append results all_train_losses.append(train_loss) all_val_losses.append(val_loss) all_test_losses.append(test_loss) all_train_correct.append(train_correct) all_val_correct.append(val_correct) all_test_correct.append(test_correct) all_train_results.append(train_results) all_val_results.append(val_results) all_test_results.append(test_results) stop = time.time() print(f"Training time: {stop - start}s") # BEST RESULTS print('Accuracy: ', 100 * max(all_train_correct) / train_set_size) print('Epoch: ', np.argmax(all_train_correct)) print() print('Accuracy: ', 100 * max(all_val_correct) / val_set_size) print('Epoch: ', np.argmax(all_val_correct)) print() print('Accuracy: ', 100 * max(all_test_correct) / test_set_size) print('Epoch: ', np.argmax(all_test_correct)) print() # SAVE RESULTS - all losses, all correct, best results all_train_losses_npy = np.array(all_train_losses) all_train_correct_npy = np.array(all_train_correct) best_train_results_npy = np.array(all_train_results[47]) all_val_losses_npy = np.array(all_val_losses) all_val_correct_npy = np.array(all_val_correct) best_val_results_npy = np.array(all_val_results[47]) all_test_losses_npy = np.array(all_test_losses) all_test_correct_npy = np.array(all_test_correct) best_test_results_npy = np.array(all_test_results[47]) fx_labels_npy = np.array(list(dataset.fx_to_label.keys())) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_train_losses')), arr=all_train_losses_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_train_correct')), arr=all_train_correct_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'best_train_results')), arr=best_train_results_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_val_losses')), arr=all_val_losses_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_val_correct')), arr=all_val_correct_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'best_val_results')), arr=best_val_results_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_test_losses')), arr=all_test_losses_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_test_correct')), arr=all_test_correct_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'best_test_results')), arr=best_test_results_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'fx_labels')), arr=fx_labels_npy) ```
github_jupyter
import torch import torch.optim as optim import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import numpy as np import time import matplotlib import matplotlib.pyplot as plt import dataset.dataset as dataset import datasplit.datasplit as datasplit import model.models as models import trainer.trainer as trainer import utils.utils as utils torch.cuda.device_count() cuda0 = torch.device('cuda:0') cuda1 = torch.device('cuda:1') cuda2 = torch.device('cuda:2') cuda3 = torch.device('cuda:3') device = torch.device(cuda0 if torch.cuda.is_available() else "cpu") # transforms transform = transforms.Compose([ transforms.ToTensor(), ]) # dataset root = '/Volumes/Macintosh HD/DATASETS/GUITAR-FX/Mono_Discrete' excl_folders = ['MT2'] spectra_folder= 'mel_22050_1024_512' proc_settings_csv = 'proc_settings.csv' max_num_settings=3 dataset = dataset.FxDataset(root=root, excl_folders=excl_folders, spectra_folder=spectra_folder, processed_settings_csv=proc_settings_csv, max_num_settings=max_num_settings, transform=transform) dataset.init_dataset() # dataset.generate_mel() # split split = datasplit.DataSplit(dataset, shuffle=True) # loaders train_loader, val_loader, test_loader = split.get_split(batch_size=100) print('dataset size: ', len(dataset)) print('train set size: ', len(split.train_sampler)) print('val set size: ', len(split.val_sampler)) print('test set size: ', len(split.test_sampler)) dataset.fx_to_label # model net = models.MultiNet(n_classes=dataset.num_fx, n_settings=dataset.max_num_settings).to(device) # optimizer optimizer = optim.Adam(net.parameters(), lr=0.001) # loss function loss_func_fx = nn.CrossEntropyLoss() loss_func_set = nn.MSELoss(reduction='mean') print(net) print('Trainable Params: ', sum(p.numel() for p in net.parameters() if p.requires_grad)) # SAVE models_folder = '../../saved/models' model_name = '20201110_multinet_mono_disc_best' results_folder = '../../saved/results' results_subfolder = '20201110_multinet_mono_disc' # TRAIN and TEST MultiNet OVER MULTIPLE EPOCHS train_set_size = len(split.train_sampler) val_set_size = len(split.val_sampler) test_set_size = len(split.test_sampler) all_train_losses, all_val_losses, all_test_losses = [],[],[] all_train_correct, all_val_correct, all_test_correct = [],[],[] all_train_results, all_val_results, all_test_results = [],[],[] best_val_correct = 0 start = time.time() for epoch in range(50): train_loss, train_correct, train_results = trainer.train_multi_net( model=net, optimizer=optimizer, train_loader=train_loader, train_sampler=split.train_sampler, epoch=epoch, loss_function_fx=loss_func_fx, loss_function_set=loss_func_set, device=device ) val_loss, val_correct, val_results = trainer.val_multi_net( model=net, val_loader=val_loader, val_sampler=split.val_sampler, loss_function_fx=loss_func_fx, loss_function_set=loss_func_set, device=device ) test_loss, test_correct, test_results = trainer.test_multi_net( model=net, test_loader=test_loader, test_sampler=split.test_sampler, loss_function_fx=loss_func_fx, loss_function_set=loss_func_set, device=device ) # save model if val_correct > best_val_correct: best_val_correct = val_correct torch.save(net, '%s/%s' % (models_folder, model_name)) print('\n=== saved best model ===\n') # append results all_train_losses.append(train_loss) all_val_losses.append(val_loss) all_test_losses.append(test_loss) all_train_correct.append(train_correct) all_val_correct.append(val_correct) all_test_correct.append(test_correct) all_train_results.append(train_results) all_val_results.append(val_results) all_test_results.append(test_results) stop = time.time() print(f"Training time: {stop - start}s") # BEST RESULTS print('Accuracy: ', 100 * max(all_train_correct) / train_set_size) print('Epoch: ', np.argmax(all_train_correct)) print() print('Accuracy: ', 100 * max(all_val_correct) / val_set_size) print('Epoch: ', np.argmax(all_val_correct)) print() print('Accuracy: ', 100 * max(all_test_correct) / test_set_size) print('Epoch: ', np.argmax(all_test_correct)) print() # SAVE RESULTS - all losses, all correct, best results all_train_losses_npy = np.array(all_train_losses) all_train_correct_npy = np.array(all_train_correct) best_train_results_npy = np.array(all_train_results[47]) all_val_losses_npy = np.array(all_val_losses) all_val_correct_npy = np.array(all_val_correct) best_val_results_npy = np.array(all_val_results[47]) all_test_losses_npy = np.array(all_test_losses) all_test_correct_npy = np.array(all_test_correct) best_test_results_npy = np.array(all_test_results[47]) fx_labels_npy = np.array(list(dataset.fx_to_label.keys())) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_train_losses')), arr=all_train_losses_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_train_correct')), arr=all_train_correct_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'best_train_results')), arr=best_train_results_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_val_losses')), arr=all_val_losses_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_val_correct')), arr=all_val_correct_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'best_val_results')), arr=best_val_results_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_test_losses')), arr=all_test_losses_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'all_test_correct')), arr=all_test_correct_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'best_test_results')), arr=best_test_results_npy) np.save(file=('%s/%s/%s' % (results_folder, results_subfolder, 'fx_labels')), arr=fx_labels_npy)
0.529993
0.690833
# 文本情感分类:使用循环神经网络 文本分类是自然语言处理的一个常见任务,它把一段不定长的文本序列变换为文本的类别。本节关注它的一个子问题:使用文本情感分类来分析文本作者的情绪。这个问题也叫情感分析,并有着广泛的应用。例如,我们可以分析用户对产品的评论并统计用户的满意度,或者分析用户对市场行情的情绪并用以预测接下来的行情。 同搜索近义词和类比词一样,文本分类也属于词嵌入的下游应用。在本节中,我们将应用预训练的词向量和含多个隐藏层的双向循环神经网络,来判断一段不定长的文本序列中包含的是正面还是负面的情绪。 在实验开始前,导入所需的包或模块。 ``` import collections import d2lzh as d2l from mxnet import gluon, init, nd from mxnet.contrib import text from mxnet.gluon import data as gdata, loss as gloss, nn, rnn, utils as gutils import os import random import tarfile ``` ## 文本情感分类数据 我们使用斯坦福的IMDb数据集(Stanford's Large Movie Review Dataset)作为文本情感分类的数据集 [1]。这个数据集分为训练和测试用的两个数据集,分别包含25,000条从IMDb下载的关于电影的评论。在每个数据集中,标签为“正面”和“负面”的评论数量相等。 ### 读取数据 首先下载这个数据集到`../data`路径下,然后解压至`../data/aclImdb`下。 ``` # 本函数已保存在d2lzh包中方便以后使用 def download_imdb(data_dir='../data'): url = ('http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz') sha1 = '01ada507287d82875905620988597833ad4e0903' fname = gutils.download(url, data_dir, sha1_hash=sha1) with tarfile.open(fname, 'r') as f: f.extractall(data_dir) download_imdb() ``` 接下来,读取训练数据集和测试数据集。每个样本是一条评论及其对应的标签:1表示“正面”,0表示“负面”。 ``` def read_imdb(folder='train'): # 本函数已保存在d2lzh包中方便以后使用 data = [] for label in ['pos', 'neg']: folder_name = os.path.join('../data/aclImdb/', folder, label) for file in os.listdir(folder_name): with open(os.path.join(folder_name, file), 'rb') as f: review = f.read().decode('utf-8').replace('\n', '').lower() data.append([review, 1 if label == 'pos' else 0]) random.shuffle(data) return data train_data, test_data = read_imdb('train'), read_imdb('test') ``` ### 预处理数据 我们需要对每条评论做分词,从而得到分好词的评论。这里定义的`get_tokenized_imdb`函数使用最简单的方法:基于空格进行分词。 ``` def get_tokenized_imdb(data): # 本函数已保存在d2lzh包中方便以后使用 def tokenizer(text): return [tok.lower() for tok in text.split(' ')] return [tokenizer(review) for review, _ in data] ``` 现在,我们可以根据分好词的训练数据集来创建词典了。我们在这里过滤掉了出现次数少于5的词。 ``` def get_vocab_imdb(data): # 本函数已保存在d2lzh包中方便以后使用 tokenized_data = get_tokenized_imdb(data) counter = collections.Counter([tk for st in tokenized_data for tk in st]) return text.vocab.Vocabulary(counter, min_freq=5) vocab = get_vocab_imdb(train_data) '# words in vocab:', len(vocab) ``` 因为每条评论长度不一致所以不能直接组合成小批量,我们定义`preprocess_imdb`函数对每条评论进行分词,并通过词典转换成词索引,然后通过截断或者补0来将每条评论长度固定成500。 ``` def preprocess_imdb(data, vocab): # 本函数已保存在d2lzh包中方便以后使用 max_l = 500 # 将每条评论通过截断或者补0,使得长度变成500 def pad(x): return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x)) tokenized_data = get_tokenized_imdb(data) features = nd.array([pad(vocab.to_indices(x)) for x in tokenized_data]) labels = nd.array([score for _, score in data]) return features, labels ``` ### 创建数据迭代器 现在,我们创建数据迭代器。每次迭代将返回一个小批量的数据。 ``` batch_size = 64 train_set = gdata.ArrayDataset(*preprocess_imdb(train_data, vocab)) test_set = gdata.ArrayDataset(*preprocess_imdb(test_data, vocab)) train_iter = gdata.DataLoader(train_set, batch_size, shuffle=True) test_iter = gdata.DataLoader(test_set, batch_size) ``` 打印第一个小批量数据的形状以及训练集中小批量的个数。 ``` for X, y in train_iter: print('X', X.shape, 'y', y.shape) break '#batches:', len(train_iter) ``` ## 使用循环神经网络的模型 在这个模型中,每个词先通过嵌入层得到特征向量。然后,我们使用双向循环神经网络对特征序列进一步编码得到序列信息。最后,我们将编码的序列信息通过全连接层变换为输出。具体来说,我们可以将双向长短期记忆在最初时间步和最终时间步的隐藏状态连结,作为特征序列的表征传递给输出层分类。在下面实现的`BiRNN`类中,`Embedding`实例即嵌入层,`LSTM`实例即为序列编码的隐藏层,`Dense`实例即生成分类结果的输出层。 ``` class BiRNN(nn.Block): def __init__(self, vocab, embed_size, num_hiddens, num_layers, **kwargs): super(BiRNN, self).__init__(**kwargs) self.embedding = nn.Embedding(len(vocab), embed_size) # bidirectional设为True即得到双向循环神经网络 self.encoder = rnn.LSTM(num_hiddens, num_layers=num_layers, bidirectional=True, input_size=embed_size) self.decoder = nn.Dense(2) def forward(self, inputs): # inputs的形状是(批量大小, 词数),因为LSTM需要将序列作为第一维,所以将输入转置后 # 再提取词特征,输出形状为(词数, 批量大小, 词向量维度) embeddings = self.embedding(inputs.T) # rnn.LSTM只传入输入embeddings,因此只返回最后一层的隐藏层在各时间步的隐藏状态。 # outputs形状是(词数, 批量大小, 2 * 隐藏单元个数) outputs = self.encoder(embeddings) # 连结初始时间步和最终时间步的隐藏状态作为全连接层输入。它的形状为 # (批量大小, 4 * 隐藏单元个数)。 encoding = nd.concat(outputs[0], outputs[-1]) outs = self.decoder(encoding) return outs ``` 创建一个含两个隐藏层的双向循环神经网络。 ``` embed_size, num_hiddens, num_layers, ctx = 100, 100, 2, d2l.try_all_gpus() net = BiRNN(vocab, embed_size, num_hiddens, num_layers) net.initialize(init.Xavier(), ctx=ctx) ``` ### 加载预训练的词向量 由于情感分类的训练数据集并不是很大,为应对过拟合,我们将直接使用在更大规模语料上预训练的词向量作为每个词的特征向量。这里,我们为词典`vocab`中的每个词加载100维的GloVe词向量。 ``` glove_embedding = text.embedding.create( 'glove', pretrained_file_name='glove.6B.100d.txt', vocabulary=vocab) ``` 然后,我们将用这些词向量作为评论中每个词的特征向量。注意,预训练词向量的维度需要与创建的模型中的嵌入层输出大小`embed_size`一致。此外,在训练中我们不再更新这些词向量。 ``` net.embedding.weight.set_data(glove_embedding.idx_to_vec) net.embedding.collect_params().setattr('grad_req', 'null') ``` ### 训练并评价模型 这时候就可以开始训练模型了。 ``` lr, num_epochs = 0.01, 5 trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}) loss = gloss.SoftmaxCrossEntropyLoss() d2l.train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs) ``` 最后,定义预测函数。 ``` # 本函数已保存在d2lzh包中方便以后使用 def predict_sentiment(net, vocab, sentence): sentence = nd.array(vocab.to_indices(sentence), ctx=d2l.try_gpu()) label = nd.argmax(net(sentence.reshape((1, -1))), axis=1) return 'positive' if label.asscalar() == 1 else 'negative' ``` 下面使用训练好的模型对两个简单句子的情感进行分类。 ``` predict_sentiment(net, vocab, ['this', 'movie', 'is', 'so', 'great']) predict_sentiment(net, vocab, ['this', 'movie', 'is', 'so', 'bad']) ``` ## 小结 * 文本分类把一段不定长的文本序列变换为文本的类别。它属于词嵌入的下游应用。 * 可以应用预训练的词向量和循环神经网络对文本的情感进行分类。 ## 练习 * 增加迭代周期。训练后的模型能在训练和测试数据集上得到怎样的准确率?再调节其他超参数试试? * 使用更大的预训练词向量,如300维的GloVe词向量,能否提升分类准确率? * 使用spaCy分词工具,能否提升分类准确率?你需要安装spaCy(`pip install spacy`),并且安装英文包(`python -m spacy download en`)。在代码中,先导入spacy(`import spacy`)。然后加载spacy英文包(`spacy_en = spacy.load('en')`)。最后定义函数`def tokenizer(text): return [tok.text for tok in spacy_en.tokenizer(text)]`并替换原来的基于空格分词的`tokenizer`函数。需要注意的是,GloVe词向量对于名词词组的存储方式是用“-”连接各个单词,例如,词组“new york”在GloVe词向量中的表示为“new-york”,而使用spaCy分词之后“new york”的存储可能是“new york”。 ## 参考文献 [1] Maas, A. L., Daly, R. E., Pham, P. T., Huang, D., Ng, A. Y., & Potts, C. (2011, June). Learning word vectors for sentiment analysis. In Proceedings of the 49th annual meeting of the association for computational linguistics: Human language technologies-volume 1 (pp. 142-150). Association for Computational Linguistics. ## 扫码直达[讨论区](https://discuss.gluon.ai/t/topic/6155) ![](../img/qr_sentiment-analysis.svg)
github_jupyter
import collections import d2lzh as d2l from mxnet import gluon, init, nd from mxnet.contrib import text from mxnet.gluon import data as gdata, loss as gloss, nn, rnn, utils as gutils import os import random import tarfile # 本函数已保存在d2lzh包中方便以后使用 def download_imdb(data_dir='../data'): url = ('http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz') sha1 = '01ada507287d82875905620988597833ad4e0903' fname = gutils.download(url, data_dir, sha1_hash=sha1) with tarfile.open(fname, 'r') as f: f.extractall(data_dir) download_imdb() def read_imdb(folder='train'): # 本函数已保存在d2lzh包中方便以后使用 data = [] for label in ['pos', 'neg']: folder_name = os.path.join('../data/aclImdb/', folder, label) for file in os.listdir(folder_name): with open(os.path.join(folder_name, file), 'rb') as f: review = f.read().decode('utf-8').replace('\n', '').lower() data.append([review, 1 if label == 'pos' else 0]) random.shuffle(data) return data train_data, test_data = read_imdb('train'), read_imdb('test') def get_tokenized_imdb(data): # 本函数已保存在d2lzh包中方便以后使用 def tokenizer(text): return [tok.lower() for tok in text.split(' ')] return [tokenizer(review) for review, _ in data] def get_vocab_imdb(data): # 本函数已保存在d2lzh包中方便以后使用 tokenized_data = get_tokenized_imdb(data) counter = collections.Counter([tk for st in tokenized_data for tk in st]) return text.vocab.Vocabulary(counter, min_freq=5) vocab = get_vocab_imdb(train_data) '# words in vocab:', len(vocab) def preprocess_imdb(data, vocab): # 本函数已保存在d2lzh包中方便以后使用 max_l = 500 # 将每条评论通过截断或者补0,使得长度变成500 def pad(x): return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x)) tokenized_data = get_tokenized_imdb(data) features = nd.array([pad(vocab.to_indices(x)) for x in tokenized_data]) labels = nd.array([score for _, score in data]) return features, labels batch_size = 64 train_set = gdata.ArrayDataset(*preprocess_imdb(train_data, vocab)) test_set = gdata.ArrayDataset(*preprocess_imdb(test_data, vocab)) train_iter = gdata.DataLoader(train_set, batch_size, shuffle=True) test_iter = gdata.DataLoader(test_set, batch_size) for X, y in train_iter: print('X', X.shape, 'y', y.shape) break '#batches:', len(train_iter) class BiRNN(nn.Block): def __init__(self, vocab, embed_size, num_hiddens, num_layers, **kwargs): super(BiRNN, self).__init__(**kwargs) self.embedding = nn.Embedding(len(vocab), embed_size) # bidirectional设为True即得到双向循环神经网络 self.encoder = rnn.LSTM(num_hiddens, num_layers=num_layers, bidirectional=True, input_size=embed_size) self.decoder = nn.Dense(2) def forward(self, inputs): # inputs的形状是(批量大小, 词数),因为LSTM需要将序列作为第一维,所以将输入转置后 # 再提取词特征,输出形状为(词数, 批量大小, 词向量维度) embeddings = self.embedding(inputs.T) # rnn.LSTM只传入输入embeddings,因此只返回最后一层的隐藏层在各时间步的隐藏状态。 # outputs形状是(词数, 批量大小, 2 * 隐藏单元个数) outputs = self.encoder(embeddings) # 连结初始时间步和最终时间步的隐藏状态作为全连接层输入。它的形状为 # (批量大小, 4 * 隐藏单元个数)。 encoding = nd.concat(outputs[0], outputs[-1]) outs = self.decoder(encoding) return outs embed_size, num_hiddens, num_layers, ctx = 100, 100, 2, d2l.try_all_gpus() net = BiRNN(vocab, embed_size, num_hiddens, num_layers) net.initialize(init.Xavier(), ctx=ctx) glove_embedding = text.embedding.create( 'glove', pretrained_file_name='glove.6B.100d.txt', vocabulary=vocab) net.embedding.weight.set_data(glove_embedding.idx_to_vec) net.embedding.collect_params().setattr('grad_req', 'null') lr, num_epochs = 0.01, 5 trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}) loss = gloss.SoftmaxCrossEntropyLoss() d2l.train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs) # 本函数已保存在d2lzh包中方便以后使用 def predict_sentiment(net, vocab, sentence): sentence = nd.array(vocab.to_indices(sentence), ctx=d2l.try_gpu()) label = nd.argmax(net(sentence.reshape((1, -1))), axis=1) return 'positive' if label.asscalar() == 1 else 'negative' predict_sentiment(net, vocab, ['this', 'movie', 'is', 'so', 'great']) predict_sentiment(net, vocab, ['this', 'movie', 'is', 'so', 'bad'])
0.365457
0.946597
## Chapter4.1 머신 러닝의 네 가지 분류 ### 지도 학습(supervised learning) - 샘플 데이터가 주이지면 알고 있는 타깃(꼬리표(annotation))에 입력 데이터를 매핑하는 방법을 학습. - Main Part : 분류, 회귀 - 시퀀스 생성(sequence generation) : 사진이 주어지면 이를 설명하는 캡션을 생성합니다. (일련의 분류 문제) - 구문 트리(syntax tree)예측 : 문장이 주어지면 분해된 구문 트리를 예측 - 물체 감지(object detection) : 사진이 주어지면 사진 안의 특정 물체 주위에 경계 상자(bounding box)를 그립니다. (회귀 + 분류) - 이미지 분할(image segmentation): 사진이 주어졌을 때 픽셀 단위로 특정 물체에 마스팅(masking)을 합니다. ### 비지도 학습(unsupervised learning) - 차원 축소(dimensionality reduction)와 군집(clustering) ### 자기 지도 학습(self-supervised learning) - 오토인코더(autoencoder) ### 강화 학습(reinforcement learning) ## Chapter 4.2 머신 러닝 모델 평가 - 검증 세터에 과대적합 : 정보 누설(information leark) // 검증 세트의 모델 성능에 기반하여 모델의 하이퍼파라미터를 조정할때마다 검증 데이터에 관한 정보가 모델로 새는 것 - 단순 홀드아웃 검증(hold-out validation), K-겹 교차 검증(k-fold cross-validation), 셔플링(shuffling)을 사용한 반복 k-겹 교차 검증(iterated K-fold cross-validation) - RepeatedStratifiedKFold ### 유의점 - 대표서 있는 데이터 : 예) 이미지 데이터의 경우 순서대로 80:20 비율로 나누면 train에는 0~7만 들어간다. shuffle 해주어야. train_test_split() 에서 stratify 매개변수로 타깃 레이블을 전달 - 시간의 방향 : 시계열 데이터의 경우 훈련 세트에 있는 데이터보다 테스트 세트에 있는 모든 데이터가 미래의 것이어야 - 데이터 중복 : 한 데이터셋에 어떤 데이터 포인트가 두 번 등장하면 트레인 셋과 테스트 셋에 데이터가 중복될 수 있다. 중복여부 확인! GroupKFold 클래스를 cross_validate()함수에 적용 ## Chapter 4.3 데이터 전처리, 특성 공학, 특성 학습 ### 신경망을 위한 데이터 전처리 #### 벡터화(data vectorziation) ex) one hot encoding #### 값 정규화 ``` x -= x.mean(axis=0) # 평균 0 x /= x.std(axis=0) # 표준 편차 1 ``` #### 누락된 값 다루기 신경망에서는 0이 사전에 정의된 의미 있는 값이 아니라면 누락된 값을 0으로 입력해도 이 값을 무시한다.(0이 누락된 값이라는 것을 학습) #### 특성 공학(feature engineering) - 좋은 특성은 적은 자원을 사용하여 문제를 더 멋지게 풀어낼 수 있습니다. 예를 들어 시계 바늘을 읽는 문제에 합성곱 신경망을 사용하는 것은 어울리지 않습니다. - 좋은 특성은 더 적은 데이터로 문제를 풀 수 있습니다. 딥러닝 모델이 스스로 특성을 학습하는 능력은 가용한 훈련 데이터가 많을 때 발휘합니다. 새믈의 개수가 적다면 특성에 있는 정보가 매우 중요해집니다. ## Chapter4.4과대적합(Overfitting)과 과소적합(Underfitting) ### 네트워크 크기 축소 - Overfitting을 막는 가장 단순한 방법은 parameter의 수를 줄이는 것. 이를 모델의 용량(capacity)라고 한다. - 예제 원본 모델 vs 더 작은 용량 모델 ``` # data import from keras.datasets import imdb import numpy as np (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000) def vectorize_sequences(sequences, dimension=10000): # 크기가 (len(sequences), dimension))이고 모든 원소가 0인 행렬을 만듭니다 results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): results[i, sequence] = 1. # results[i]에서 특정 인덱스의 위치를 1로 만듭니다 return results # 훈련 데이터를 벡터로 변환합니다 x_train = vectorize_sequences(train_data) # 테스트 데이터를 벡터로 변환합니다 x_test = vectorize_sequences(test_data) # 레이블을 벡터로 변환합니다 y_train = np.asarray(train_labels).astype('float32') y_test = np.asarray(test_labels).astype('float32') from keras import models from keras import layers original_model = models.Sequential() original_model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) original_model.add(layers.Dense(16, activation='relu')) original_model.add(layers.Dense(1, activation='sigmoid')) original_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) smaller_model = models.Sequential() smaller_model.add(layers.Dense(6, activation='relu', input_shape=(10000,))) smaller_model.add(layers.Dense(6, activation='relu')) smaller_model.add(layers.Dense(1, activation='sigmoid')) smaller_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) original_hist = original_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) smaller_model_hist = smaller_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) epochs = range(1, 21) original_val_loss = original_hist.history['val_loss'] smaller_model_val_loss = smaller_model_hist.history['val_loss'] import matplotlib.pyplot as plt %matplotlib inline # ‘b+’는 파란색 덧셈 기호을 의미합니다 plt.plot(epochs, original_val_loss, 'b+', label='Original model') # ‘bo’는 파란색 점을 의미합니다 plt.plot(epochs, smaller_model_val_loss, 'bo', label='Smaller model') plt.xlabel('Epochs') plt.ylabel('Validation loss') plt.legend() plt.show() ``` - 작은 네트워크가 원래 네트워크보다 더 늦게(많은 Epochs에서) 과대적합되기 시작했다. ``` # 더 큰 용량을 가진 네트워크 bigger_model = models.Sequential() bigger_model.add(layers.Dense(1024, activation='relu', input_shape=(10000,))) bigger_model.add(layers.Dense(1024, activation='relu')) bigger_model.add(layers.Dense(1, activation='sigmoid')) bigger_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) bigger_model_hist = bigger_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) bigger_model_val_loss = bigger_model_hist.history['val_loss'] plt.plot(epochs, original_val_loss, 'b+', label='Original model') plt.plot(epochs, bigger_model_val_loss, 'bo', label='Bigger model') plt.xlabel('Epochs') plt.ylabel('Validation loss') plt.legend() plt.show() ``` - Bigger model이 더 먼저 과대적합하기 시작했다. ``` original_train_loss = original_hist.history['loss'] bigger_model_train_loss = bigger_model_hist.history['loss'] plt.plot(epochs, original_train_loss, 'b+', label='Original model') plt.plot(epochs, bigger_model_train_loss, 'bo', label='Bigger model') plt.xlabel('Epochs') plt.ylabel('Training loss') plt.legend() plt.show() ``` - Training data 에서 loss를 비교 : Bigger model 에서 더 빨리 0으로 수렴(과대적합 의심) ### 가중치 규제 추가 - 가중치 규제(weight regularization) : 네트워크 복잡도에 제한을 두어 가중치가 작은 값을 가지도록 강제하는 것. 가중치 값의 분포가 더 균일하게 된다. - L1 규제: 가중치가 절대값에 비례하는 비용이 추가 - L2 규제 : 가중치의 제곱에 비례하는 비용이 추가. called 가중치 감쇠(weight decay) ``` # 영하 리뷰에 가중치 규제 (L2) 추가 from keras import regularizers l2_model = models.Sequential() l2_model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001), activation='relu', input_shape=(10000,))) l2_model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001), activation='relu')) l2_model.add(layers.Dense(1, activation='sigmoid')) l2_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) l2_model_hist = l2_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) l2_model_val_loss = l2_model_hist.history['val_loss'] plt.plot(epochs, original_val_loss, 'b+', label='Original model') plt.plot(epochs, l2_model_val_loss, 'bo', label='L2-regularized model') plt.xlabel('Epochs') plt.ylabel('Validation loss') plt.legend() plt.show() ``` - L2 규제를 추가한것이 훨씬 더 과대적합을 피하고있다. ``` # 다른 가중치 규제 from keras import regularizers regularizers.l1(0.001) # L1규제 regularizers.l1_l2(l1=0.001, l2=0.001) # L1, L2 병행규제 ``` ### 드롭아웃(dropout) 추가 - 훈련하는 동안 무작위로 층의 일부 출력 특성을 제외시킨다.(w=0) - 드랍아웃 비율 : 0이 될 특성의 비율 // 보통 0.2 ~ 0.5 ``` # dropout model dpt_model = models.Sequential() dpt_model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) dpt_model.add(layers.Dropout(0.5)) dpt_model.add(layers.Dense(16, activation='relu')) dpt_model.add(layers.Dropout(0.5)) dpt_model.add(layers.Dense(1, activation='sigmoid')) dpt_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) dpt_model_hist = dpt_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) dpt_model_val_loss = dpt_model_hist.history['val_loss'] plt.plot(epochs, original_val_loss, 'b+', label='Original model') plt.plot(epochs, dpt_model_val_loss, 'bo', label='Dropout-regularized model') plt.xlabel('Epochs') plt.ylabel('Validation loss') plt.legend() plt.show() ```
github_jupyter
x -= x.mean(axis=0) # 평균 0 x /= x.std(axis=0) # 표준 편차 1 # data import from keras.datasets import imdb import numpy as np (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000) def vectorize_sequences(sequences, dimension=10000): # 크기가 (len(sequences), dimension))이고 모든 원소가 0인 행렬을 만듭니다 results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): results[i, sequence] = 1. # results[i]에서 특정 인덱스의 위치를 1로 만듭니다 return results # 훈련 데이터를 벡터로 변환합니다 x_train = vectorize_sequences(train_data) # 테스트 데이터를 벡터로 변환합니다 x_test = vectorize_sequences(test_data) # 레이블을 벡터로 변환합니다 y_train = np.asarray(train_labels).astype('float32') y_test = np.asarray(test_labels).astype('float32') from keras import models from keras import layers original_model = models.Sequential() original_model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) original_model.add(layers.Dense(16, activation='relu')) original_model.add(layers.Dense(1, activation='sigmoid')) original_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) smaller_model = models.Sequential() smaller_model.add(layers.Dense(6, activation='relu', input_shape=(10000,))) smaller_model.add(layers.Dense(6, activation='relu')) smaller_model.add(layers.Dense(1, activation='sigmoid')) smaller_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) original_hist = original_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) smaller_model_hist = smaller_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) epochs = range(1, 21) original_val_loss = original_hist.history['val_loss'] smaller_model_val_loss = smaller_model_hist.history['val_loss'] import matplotlib.pyplot as plt %matplotlib inline # ‘b+’는 파란색 덧셈 기호을 의미합니다 plt.plot(epochs, original_val_loss, 'b+', label='Original model') # ‘bo’는 파란색 점을 의미합니다 plt.plot(epochs, smaller_model_val_loss, 'bo', label='Smaller model') plt.xlabel('Epochs') plt.ylabel('Validation loss') plt.legend() plt.show() # 더 큰 용량을 가진 네트워크 bigger_model = models.Sequential() bigger_model.add(layers.Dense(1024, activation='relu', input_shape=(10000,))) bigger_model.add(layers.Dense(1024, activation='relu')) bigger_model.add(layers.Dense(1, activation='sigmoid')) bigger_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) bigger_model_hist = bigger_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) bigger_model_val_loss = bigger_model_hist.history['val_loss'] plt.plot(epochs, original_val_loss, 'b+', label='Original model') plt.plot(epochs, bigger_model_val_loss, 'bo', label='Bigger model') plt.xlabel('Epochs') plt.ylabel('Validation loss') plt.legend() plt.show() original_train_loss = original_hist.history['loss'] bigger_model_train_loss = bigger_model_hist.history['loss'] plt.plot(epochs, original_train_loss, 'b+', label='Original model') plt.plot(epochs, bigger_model_train_loss, 'bo', label='Bigger model') plt.xlabel('Epochs') plt.ylabel('Training loss') plt.legend() plt.show() # 영하 리뷰에 가중치 규제 (L2) 추가 from keras import regularizers l2_model = models.Sequential() l2_model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001), activation='relu', input_shape=(10000,))) l2_model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001), activation='relu')) l2_model.add(layers.Dense(1, activation='sigmoid')) l2_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) l2_model_hist = l2_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) l2_model_val_loss = l2_model_hist.history['val_loss'] plt.plot(epochs, original_val_loss, 'b+', label='Original model') plt.plot(epochs, l2_model_val_loss, 'bo', label='L2-regularized model') plt.xlabel('Epochs') plt.ylabel('Validation loss') plt.legend() plt.show() # 다른 가중치 규제 from keras import regularizers regularizers.l1(0.001) # L1규제 regularizers.l1_l2(l1=0.001, l2=0.001) # L1, L2 병행규제 # dropout model dpt_model = models.Sequential() dpt_model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) dpt_model.add(layers.Dropout(0.5)) dpt_model.add(layers.Dense(16, activation='relu')) dpt_model.add(layers.Dropout(0.5)) dpt_model.add(layers.Dense(1, activation='sigmoid')) dpt_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) dpt_model_hist = dpt_model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_test, y_test)) dpt_model_val_loss = dpt_model_hist.history['val_loss'] plt.plot(epochs, original_val_loss, 'b+', label='Original model') plt.plot(epochs, dpt_model_val_loss, 'bo', label='Dropout-regularized model') plt.xlabel('Epochs') plt.ylabel('Validation loss') plt.legend() plt.show()
0.778733
0.975083
# Predicting Boston Housing Prices ## Using XGBoost in SageMaker (Deploy) _Deep Learning Nanodegree Program | Deployment_ --- As an introduction to using SageMaker's High Level Python API we will look at a relatively simple problem. Namely, we will use the [Boston Housing Dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) to predict the median value of a home in the area of Boston Mass. The documentation for the high level API can be found on the [ReadTheDocs page](http://sagemaker.readthedocs.io/en/latest/) ## General Outline Typically, when using a notebook instance with SageMaker, you will proceed through the following steps. Of course, not every step will need to be done with each project. Also, there is quite a lot of room for variation in many of the steps, as you will see throughout these lessons. 1. Download or otherwise retrieve the data. 2. Process / Prepare the data. 3. Upload the processed data to S3. 4. Train a chosen model. 5. Test the trained model (typically using a batch transform job). 6. Deploy the trained model. 7. Use the deployed model. In this notebook we will be skipping step 5, testing the model. We will still test the model but we will do so by first deploying the model and then sending the test data to the deployed model. ## Step 0: Setting up the notebook We begin by setting up all of the necessary bits required to run our notebook. To start that means loading all of the Python modules we will need. ``` %matplotlib inline import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_boston import sklearn.model_selection ``` In addition to the modules above, we need to import the various bits of SageMaker that we will be using. ``` import sagemaker from sagemaker import get_execution_role from sagemaker.amazon.amazon_estimator import get_image_uri from sagemaker.predictor import csv_serializer # This is an object that represents the SageMaker session that we are currently operating in. This # object contains some useful information that we will need to access later such as our region. session = sagemaker.Session() # This is an object that represents the IAM role that we are currently assigned. When we construct # and launch the training job later we will need to tell it what IAM role it should have. Since our # use case is relatively simple we will simply assign the training job the role we currently have. role = get_execution_role() ``` ## Step 1: Downloading the data Fortunately, this dataset can be retrieved using sklearn and so this step is relatively straightforward. ``` boston = load_boston() ``` ## Step 2: Preparing and splitting the data Given that this is clean tabular data, we don't need to do any processing. However, we do need to split the rows in the dataset up into train, test and validation sets. ``` # First we package up the input data and the target variable (the median value) as pandas dataframes. This # will make saving the data to a file a little easier later on. X_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names) Y_bos_pd = pd.DataFrame(boston.target) # We split the dataset into 2/3 training and 1/3 testing sets. X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33) # Then we split the training set further into 2/3 training and 1/3 validation sets. X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33) ``` ## Step 3: Uploading the training and validation files to S3 When a training job is constructed using SageMaker, a container is executed which performs the training operation. This container is given access to data that is stored in S3. This means that we need to upload the data we want to use for training to S3. We can use the SageMaker API to do this and hide some of the details. ### Save the data locally First we need to create the train and validation csv files which we will then upload to S3. ``` # This is our local data directory. We need to make sure that it exists. data_dir = '../data/boston' if not os.path.exists(data_dir): os.makedirs(data_dir) # We use pandas to save our train and validation data to csv files. Note that we make sure not to include header # information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed # that the first entry in each row is the target variable. pd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False) pd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) ``` ### Upload to S3 Since we are currently running inside of a SageMaker session, we can use the object which represents this session to upload our data to the 'default' S3 bucket. Note that it is good practice to provide a custom prefix (essentially an S3 folder) to make sure that you don't accidentally interfere with data uploaded from some other notebook or project. ``` prefix = 'boston-xgboost-deploy-hl' val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix) train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix) ``` ## Step 4: Train the XGBoost model Now that we have the training and validation data uploaded to S3, we can construct our XGBoost model and train it. We will be making use of the high level SageMaker API to do this which will make the resulting code a little easier to read at the cost of some flexibility. To construct an estimator, the object which we wish to train, we need to provide the location of a container which contains the training code. Since we are using a built in algorithm this container is provided by Amazon. However, the full name of the container is a bit lengthy and depends on the region that we are operating in. Fortunately, SageMaker provides a useful utility method called `get_image_uri` that constructs the image name for us. To use the `get_image_uri` method we need to provide it with our current region, which can be obtained from the session object, and the name of the algorithm we wish to use. In this notebook we will be using XGBoost however you could try another algorithm if you wish. The list of built in algorithms can be found in the list of [Common Parameters](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). ``` # As stated above, we use this utility method to construct the image name for the training container. container = get_image_uri(session.boto_region_name, 'xgboost') # Now that we know which container to use, we can construct the estimator object. xgb = sagemaker.estimator.Estimator(container, # The name of the training container role, # The IAM role to use (our current role in this case) train_instance_count=1, # The number of instances to use for training train_instance_type='ml.m4.xlarge', # The type of instance ot use for training output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), # Where to save the output (the model artifacts) sagemaker_session=session) # The current SageMaker session ``` Before asking SageMaker to begin the training job, we should probably set any model specific hyperparameters. There are quite a few that can be set when using the XGBoost algorithm, below are just a few of them. If you would like to change the hyperparameters below or modify additional ones you can find additional information on the [XGBoost hyperparameter page](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html) ``` xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, objective='reg:linear', early_stopping_rounds=10, num_round=200) ``` Now that we have our estimator object completely set up, it is time to train it. To do this we make sure that SageMaker knows our input data is in csv format and then execute the `fit` method. ``` # This is a wrapper around the location of our train and validation data, to make sure that SageMaker # knows our data is in csv format. s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv') s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) ``` ## Step 5: Test the trained model We will be skipping this step for now. We will still test our trained model but we are going to do it by using the deployed model, rather than setting up a batch transform job. ## Step 6: Deploy the trained model Now that we have fit our model to the training data, using the validation data to avoid overfitting, we can deploy our model and test it. Deploying is very simple when we use the high level API, we need only call the `deploy` method of our trained estimator. **NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for. In other words **If you are no longer using a deployed endpoint, shut it down!** ``` xgb_predictor = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') ``` ## Step 7: Use the model Now that our model is trained and deployed we can send the test data to it and evaluate the results. Here, because our test data is so small, we can send it all using a single call to our endpoint. If our test dataset was larger we would need to split it up and send the data in chunks, making sure to accumulate the results. ``` # We need to tell the endpoint what format the data we are sending is in xgb_predictor.content_type = 'text/csv' xgb_predictor.serializer = csv_serializer Y_pred = xgb_predictor.predict(X_test.values).decode('utf-8') # predictions is currently a comma delimited string and so we would like to break it up # as a numpy array. Y_pred = np.fromstring(Y_pred, sep=',') ``` To see how well our model works we can create a simple scatter plot between the predicted and actual values. If the model was completely accurate the resulting scatter plot would look like the line $x=y$. As we can see, our model seems to have done okay but there is room for improvement. ``` plt.scatter(Y_test, Y_pred) plt.xlabel("Median Price") plt.ylabel("Predicted Price") plt.title("Median Price vs Predicted Price") ``` ## Delete the endpoint Since we are no longer using the deployed model we need to make sure to shut it down. Remember that you have to pay for the length of time that your endpoint is deployed so the longer it is left running, the more it costs. ``` xgb_predictor.delete_endpoint() ``` ## Optional: Clean up The default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook. ``` # First we will remove all of the files contained in the data_dir directory !rm $data_dir/* # And then we delete the directory itself !rmdir $data_dir ```
github_jupyter
%matplotlib inline import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_boston import sklearn.model_selection import sagemaker from sagemaker import get_execution_role from sagemaker.amazon.amazon_estimator import get_image_uri from sagemaker.predictor import csv_serializer # This is an object that represents the SageMaker session that we are currently operating in. This # object contains some useful information that we will need to access later such as our region. session = sagemaker.Session() # This is an object that represents the IAM role that we are currently assigned. When we construct # and launch the training job later we will need to tell it what IAM role it should have. Since our # use case is relatively simple we will simply assign the training job the role we currently have. role = get_execution_role() boston = load_boston() # First we package up the input data and the target variable (the median value) as pandas dataframes. This # will make saving the data to a file a little easier later on. X_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names) Y_bos_pd = pd.DataFrame(boston.target) # We split the dataset into 2/3 training and 1/3 testing sets. X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33) # Then we split the training set further into 2/3 training and 1/3 validation sets. X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33) # This is our local data directory. We need to make sure that it exists. data_dir = '../data/boston' if not os.path.exists(data_dir): os.makedirs(data_dir) # We use pandas to save our train and validation data to csv files. Note that we make sure not to include header # information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed # that the first entry in each row is the target variable. pd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False) pd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) prefix = 'boston-xgboost-deploy-hl' val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix) train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix) # As stated above, we use this utility method to construct the image name for the training container. container = get_image_uri(session.boto_region_name, 'xgboost') # Now that we know which container to use, we can construct the estimator object. xgb = sagemaker.estimator.Estimator(container, # The name of the training container role, # The IAM role to use (our current role in this case) train_instance_count=1, # The number of instances to use for training train_instance_type='ml.m4.xlarge', # The type of instance ot use for training output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), # Where to save the output (the model artifacts) sagemaker_session=session) # The current SageMaker session xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, objective='reg:linear', early_stopping_rounds=10, num_round=200) # This is a wrapper around the location of our train and validation data, to make sure that SageMaker # knows our data is in csv format. s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv') s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) xgb_predictor = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') # We need to tell the endpoint what format the data we are sending is in xgb_predictor.content_type = 'text/csv' xgb_predictor.serializer = csv_serializer Y_pred = xgb_predictor.predict(X_test.values).decode('utf-8') # predictions is currently a comma delimited string and so we would like to break it up # as a numpy array. Y_pred = np.fromstring(Y_pred, sep=',') plt.scatter(Y_test, Y_pred) plt.xlabel("Median Price") plt.ylabel("Predicted Price") plt.title("Median Price vs Predicted Price") xgb_predictor.delete_endpoint() # First we will remove all of the files contained in the data_dir directory !rm $data_dir/* # And then we delete the directory itself !rmdir $data_dir
0.493897
0.992192
``` import numpy as np import sys import tensorflow as tf import matplotlib.pyplot as plt %matplotlib inline from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) inputs_ = tf.placeholder(tf.float32,[None,28,28,1]) targets_ = tf.placeholder(tf.float32,[None,28,28,1]) def lrelu(x,alpha=0.1): return tf.maximum(alpha*x,x) ### Encoder with tf.name_scope('en-convolutions'): conv1 = tf.layers.conv2d(inputs_,filters=32,kernel_size=(3,3),strides=(1,1),padding='SAME',use_bias=True,activation=lrelu,name='conv1') # Now 28x28x32 with tf.name_scope('en-pooling'): maxpool1 = tf.layers.max_pooling2d(conv1,pool_size=(2,2),strides=(2,2),name='pool1') # Now 14x14x32 with tf.name_scope('en-convolutions'): conv2 = tf.layers.conv2d(maxpool1,filters=32,kernel_size=(3,3),strides=(1,1),padding='SAME',use_bias=True,activation=lrelu,name='conv2') # Now 14x14x32 with tf.name_scope('encoding'): encoded = tf.layers.max_pooling2d(conv2,pool_size=(2,2),strides=(2,2),name='encoding') # Now 7x7x32. #latent space ### Decoder with tf.name_scope('decoder'): conv3 = tf.layers.conv2d(encoded,filters=32,kernel_size=(3,3),strides=(1,1),name='conv3',padding='SAME',use_bias=True,activation=lrelu) #Now 7x7x32 upsample1 = tf.layers.conv2d_transpose(conv3,filters=32,kernel_size=3,padding='same',strides=2,name='upsample1') # Now 14x14x32 upsample2 = tf.layers.conv2d_transpose(upsample1,filters=32,kernel_size=3,padding='same',strides=2,name='upsample2') # Now 28x28x32 logits = tf.layers.conv2d(upsample2,filters=1,kernel_size=(3,3),strides=(1,1),name='logits',padding='SAME',use_bias=True) #Now 28x28x1 # Pass logits through sigmoid to get reconstructed image decoded = tf.sigmoid(logits,name='recon') loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=targets_) learning_rate=tf.placeholder(tf.float32) cost = tf.reduce_mean(loss) #cost opt = tf.train.AdamOptimizer(learning_rate).minimize(cost) #optimizer # Training sess = tf.Session() #tf.reset_default_graph() saver = tf.train.Saver() loss = [] valid_loss = [] display_step = 1 epochs = 25 batch_size = 64 #lr=[1e-3/(2**(i//5))for i in range(epochs)] lr=1e-5 sess.run(tf.global_variables_initializer()) writer = tf.summary.FileWriter('./graphs', sess.graph) for e in range(epochs): total_batch = int(mnist.train.num_examples/batch_size) for ibatch in range(total_batch): batch_x = mnist.train.next_batch(batch_size) batch_test_x= mnist.test.next_batch(batch_size) imgs_test = batch_x[0].reshape((-1, 28, 28, 1)) noise_factor = 0.5 x_test_noisy = imgs_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=imgs_test.shape) x_test_noisy = np.clip(x_test_noisy, 0., 1.) imgs = batch_x[0].reshape((-1, 28, 28, 1)) x_train_noisy = imgs + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=imgs.shape) x_train_noisy = np.clip(x_train_noisy, 0., 1.) batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: x_train_noisy, targets_: imgs,learning_rate:lr}) batch_cost_test = sess.run(cost, feed_dict={inputs_: x_test_noisy, targets_: imgs_test}) if (e+1) % display_step == 0: print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost), "Validation loss: {:.4f}".format(batch_cost_test)) loss.append(batch_cost) valid_loss.append(batch_cost_test) plt.plot(range(e+1), loss, 'bo', label='Training loss') plt.plot(range(e+1), valid_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs ',fontsize=16) plt.ylabel('Loss',fontsize=16) plt.legend() plt.figure() plt.show() saver.save(sess, 'encode_model') batch_x= mnist.test.next_batch(10) imgs = batch_x[0].reshape((-1, 28, 28, 1)) noise_factor = 0.5 x_test_noisy = imgs + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=imgs.shape) x_test_noisy = np.clip(x_test_noisy, 0., 1.) recon_img = sess.run([decoded], feed_dict={inputs_: x_test_noisy})[0] plt.figure(figsize=(20, 4)) plt.title('Reconstructed Images') print("Original Images") for i in range(10): plt.subplot(2, 10, i+1) plt.imshow(imgs[i, ..., 0], cmap='gray') plt.show() plt.figure(figsize=(20, 4)) print("Noisy Images") for i in range(10): plt.subplot(2, 10, i+1) plt.imshow(x_test_noisy[i, ..., 0], cmap='gray') plt.show() plt.figure(figsize=(20, 4)) print("Reconstruction of Noisy Images") for i in range(10): plt.subplot(2, 10, i+1) plt.imshow(recon_img[i, ..., 0], cmap='gray') plt.show() writer.close() sess.close() ```
github_jupyter
import numpy as np import sys import tensorflow as tf import matplotlib.pyplot as plt %matplotlib inline from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) inputs_ = tf.placeholder(tf.float32,[None,28,28,1]) targets_ = tf.placeholder(tf.float32,[None,28,28,1]) def lrelu(x,alpha=0.1): return tf.maximum(alpha*x,x) ### Encoder with tf.name_scope('en-convolutions'): conv1 = tf.layers.conv2d(inputs_,filters=32,kernel_size=(3,3),strides=(1,1),padding='SAME',use_bias=True,activation=lrelu,name='conv1') # Now 28x28x32 with tf.name_scope('en-pooling'): maxpool1 = tf.layers.max_pooling2d(conv1,pool_size=(2,2),strides=(2,2),name='pool1') # Now 14x14x32 with tf.name_scope('en-convolutions'): conv2 = tf.layers.conv2d(maxpool1,filters=32,kernel_size=(3,3),strides=(1,1),padding='SAME',use_bias=True,activation=lrelu,name='conv2') # Now 14x14x32 with tf.name_scope('encoding'): encoded = tf.layers.max_pooling2d(conv2,pool_size=(2,2),strides=(2,2),name='encoding') # Now 7x7x32. #latent space ### Decoder with tf.name_scope('decoder'): conv3 = tf.layers.conv2d(encoded,filters=32,kernel_size=(3,3),strides=(1,1),name='conv3',padding='SAME',use_bias=True,activation=lrelu) #Now 7x7x32 upsample1 = tf.layers.conv2d_transpose(conv3,filters=32,kernel_size=3,padding='same',strides=2,name='upsample1') # Now 14x14x32 upsample2 = tf.layers.conv2d_transpose(upsample1,filters=32,kernel_size=3,padding='same',strides=2,name='upsample2') # Now 28x28x32 logits = tf.layers.conv2d(upsample2,filters=1,kernel_size=(3,3),strides=(1,1),name='logits',padding='SAME',use_bias=True) #Now 28x28x1 # Pass logits through sigmoid to get reconstructed image decoded = tf.sigmoid(logits,name='recon') loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=targets_) learning_rate=tf.placeholder(tf.float32) cost = tf.reduce_mean(loss) #cost opt = tf.train.AdamOptimizer(learning_rate).minimize(cost) #optimizer # Training sess = tf.Session() #tf.reset_default_graph() saver = tf.train.Saver() loss = [] valid_loss = [] display_step = 1 epochs = 25 batch_size = 64 #lr=[1e-3/(2**(i//5))for i in range(epochs)] lr=1e-5 sess.run(tf.global_variables_initializer()) writer = tf.summary.FileWriter('./graphs', sess.graph) for e in range(epochs): total_batch = int(mnist.train.num_examples/batch_size) for ibatch in range(total_batch): batch_x = mnist.train.next_batch(batch_size) batch_test_x= mnist.test.next_batch(batch_size) imgs_test = batch_x[0].reshape((-1, 28, 28, 1)) noise_factor = 0.5 x_test_noisy = imgs_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=imgs_test.shape) x_test_noisy = np.clip(x_test_noisy, 0., 1.) imgs = batch_x[0].reshape((-1, 28, 28, 1)) x_train_noisy = imgs + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=imgs.shape) x_train_noisy = np.clip(x_train_noisy, 0., 1.) batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: x_train_noisy, targets_: imgs,learning_rate:lr}) batch_cost_test = sess.run(cost, feed_dict={inputs_: x_test_noisy, targets_: imgs_test}) if (e+1) % display_step == 0: print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost), "Validation loss: {:.4f}".format(batch_cost_test)) loss.append(batch_cost) valid_loss.append(batch_cost_test) plt.plot(range(e+1), loss, 'bo', label='Training loss') plt.plot(range(e+1), valid_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs ',fontsize=16) plt.ylabel('Loss',fontsize=16) plt.legend() plt.figure() plt.show() saver.save(sess, 'encode_model') batch_x= mnist.test.next_batch(10) imgs = batch_x[0].reshape((-1, 28, 28, 1)) noise_factor = 0.5 x_test_noisy = imgs + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=imgs.shape) x_test_noisy = np.clip(x_test_noisy, 0., 1.) recon_img = sess.run([decoded], feed_dict={inputs_: x_test_noisy})[0] plt.figure(figsize=(20, 4)) plt.title('Reconstructed Images') print("Original Images") for i in range(10): plt.subplot(2, 10, i+1) plt.imshow(imgs[i, ..., 0], cmap='gray') plt.show() plt.figure(figsize=(20, 4)) print("Noisy Images") for i in range(10): plt.subplot(2, 10, i+1) plt.imshow(x_test_noisy[i, ..., 0], cmap='gray') plt.show() plt.figure(figsize=(20, 4)) print("Reconstruction of Noisy Images") for i in range(10): plt.subplot(2, 10, i+1) plt.imshow(recon_img[i, ..., 0], cmap='gray') plt.show() writer.close() sess.close()
0.419053
0.673336
In this notebook, we will go over several Python examples for detecting anomalies (outliers) from data. Anomaly detection is the task of identifying instances whose characteristics differ significantly from the rest of the data. In this tutorial, we will provide examples of applying different anomaly detection techniques using Python and its library packages. This approach assumes that the majority of the data instances are governed by some well-known probability distribution, e.g., Binomial or Gaussian distribution. Anomalies can then detected by seeking for observations that do not fit the overall distribution of the data. In this example, our goal is to detect anomalous changes in the daily closing prices of various stocks. The input data *stocks.csv* contains the historical closing prices of stocks for 3 large corporations (Microsoft, Ford Motor Company, and Bank of America). ``` import pandas as pd stocks = pd.read_csv('data/stocks.csv', header='infer' ) stocks.index = stocks['Date'] stocks = stocks.drop(['Date'],axis=1) stocks.head() ``` We can compute the percentage of changes in the daily closing price of each stock as follows: \begin{equation} \Delta(t) = 100 \times \frac{x_t - x_{t-1}}{x_{t-1}} \end{equation} where $x_t$ denotes the price of a stock on day $t$ and $x_{t-1}$ denotes the price on its previous day, $t-1$. ``` import numpy as np N,d = stocks.shape delta = pd.DataFrame(100*np.divide(stocks.iloc[1:,:].values-stocks.iloc[:N-1,:].values, stocks.iloc[:N-1,:].values), columns=stocks.columns, index=stocks.iloc[1:].index) delta.head() ``` We can plot the distribution of the percentage daily changes in stock price. ``` from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt %matplotlib inline fig = plt.figure(figsize=(8,5)).gca(projection='3d') fig.scatter(delta.MSFT,delta.F,delta.BAC) fig.set_xlabel('Microsoft') fig.set_ylabel('Ford') fig.set_zlabel('Bank of America') plt.show() ``` Assuming the data follows a multivariate Gaussian distribution, we can compute the mean and covariance matrix of the 3-dimensional data as follows ``` meanValue = delta.mean() covValue = delta.cov() print(meanValue) print(covValue) ``` To determine the anomalous trading days, we can compute the Mahalanobis distance between the percentage of price change on each day against the mean percentage of price change. \begin{equation} \textrm{Mahalanobis}(x) = (x - \bar{x}) \Sigma^{-1}(x - \bar{x})^T \end{equation} where $x$ is assumed to be a row vector. See Equation 9.4 in Section 9.3.1 for more information about using Mahalanobis distance for detecting anomalies in multivariate Gaussian distribution. ``` from numpy.linalg import inv X = delta.as_matrix() S = covValue.as_matrix() for i in range(3): X[:,i] = X[:,i] - meanValue[i] def mahalanobis(row): return np.matmul(row,S).dot(row) anomaly_score = np.apply_along_axis( mahalanobis, axis=1, arr=X) fig = plt.figure(figsize=(10,6)) ax = fig.add_subplot(111, projection='3d') p = ax.scatter(delta.MSFT,delta.F,delta.BAC,c=anomaly_score,cmap='jet') ax.set_xlabel('Microsoft') ax.set_ylabel('Ford') ax.set_zlabel('Bank of America') fig.colorbar(p) plt.show() ``` The top-2 anomalies are shown as a brown point in the figure above. The highest anomaly corresponds to the day in which the prices for all 3 stocks increase significantly whereas the second highest anomaly corresponds to the day in which all 3 stocks suffer a large percentage drop in their closing prices. We can examine the dates associated with the top-2 highest anomaly scores as follows. ``` anom = pd.DataFrame(anomaly_score, index=delta.index, columns=['Anomaly score']) result = pd.concat((delta,anom), axis=1) result.nlargest(2,'Anomaly score') ``` Note that the sharp drop in the stock prices on October 7, 2008 coincide with the beginning of the global financial crisis (https://en.wikipedia.org/wiki/Global_financial_crisis_in_October_2008) while the increase in the stock prices on April 9, 2009. ``` fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(15,6)) ts = delta[440:447] ts.plot.line(ax=ax1) ax1.set_xticks(range(7)) ax1.set_xticklabels(ts.index) ax1.set_ylabel('Percent Change') ts = delta[568:575] ts.plot.line(ax=ax2) ax2.set_xticks(range(7)) ax2.set_xticklabels(ts.index) ax2.set_ylabel('Percent Change') ``` The distance-based approach is a model-free anomaly detection approach as it does not require constructing an explicit model of the normal class to determine the anomaly score of data instances. The example code shown below employs the k-nearest neighbor approach to calculate anomaly score. Specifically, a normal instance is expected to have a small distance to its k-th nearest neighbor whereas an anomaly is likely to have a large distance to its k-th nearest neighbor. In the example below, we apply the distance-based approach with k=4 to identify the anomalous trading days from the stock market data described in the previous section. ``` from sklearn.neighbors import NearestNeighbors import numpy as np from scipy.spatial import distance knn = 4 nbrs = NearestNeighbors(n_neighbors=knn, metric=distance.euclidean).fit(delta.as_matrix()) distances, indices = nbrs.kneighbors(delta.as_matrix()) anomaly_score = distances[:,knn-1] fig = plt.figure(figsize=(10,6)) ax = fig.add_subplot(111, projection='3d') p = ax.scatter(delta.MSFT,delta.F,delta.BAC,c=anomaly_score,cmap='jet') ax.set_xlabel('Microsoft') ax.set_ylabel('Ford') ax.set_zlabel('Bank of America') fig.colorbar(p) plt.show() ``` The results are slightly different than the one shown in Section 9.1 since we have used Euclidean distance (instead of Mahalanobis distance) to detect the anomalies. We can examine the dates associated with the top-5 highest anomaly scores as follows. ``` anom = pd.DataFrame(anomaly_score, index=delta.index, columns=['Anomaly score']) result = pd.concat((delta,anom), axis=1) result.nlargest(5,'Anomaly score') fig = plt.figure(figsize=(10,4)) ax = fig.add_subplot(111) ts = delta[445:452] ts.plot.line(ax=ax) ax.set_xticks(range(7)) ax.set_xticklabels(ts.index) ax.set_ylabel('Percent Change') ```
github_jupyter
import pandas as pd stocks = pd.read_csv('data/stocks.csv', header='infer' ) stocks.index = stocks['Date'] stocks = stocks.drop(['Date'],axis=1) stocks.head() import numpy as np N,d = stocks.shape delta = pd.DataFrame(100*np.divide(stocks.iloc[1:,:].values-stocks.iloc[:N-1,:].values, stocks.iloc[:N-1,:].values), columns=stocks.columns, index=stocks.iloc[1:].index) delta.head() from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt %matplotlib inline fig = plt.figure(figsize=(8,5)).gca(projection='3d') fig.scatter(delta.MSFT,delta.F,delta.BAC) fig.set_xlabel('Microsoft') fig.set_ylabel('Ford') fig.set_zlabel('Bank of America') plt.show() meanValue = delta.mean() covValue = delta.cov() print(meanValue) print(covValue) from numpy.linalg import inv X = delta.as_matrix() S = covValue.as_matrix() for i in range(3): X[:,i] = X[:,i] - meanValue[i] def mahalanobis(row): return np.matmul(row,S).dot(row) anomaly_score = np.apply_along_axis( mahalanobis, axis=1, arr=X) fig = plt.figure(figsize=(10,6)) ax = fig.add_subplot(111, projection='3d') p = ax.scatter(delta.MSFT,delta.F,delta.BAC,c=anomaly_score,cmap='jet') ax.set_xlabel('Microsoft') ax.set_ylabel('Ford') ax.set_zlabel('Bank of America') fig.colorbar(p) plt.show() anom = pd.DataFrame(anomaly_score, index=delta.index, columns=['Anomaly score']) result = pd.concat((delta,anom), axis=1) result.nlargest(2,'Anomaly score') fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(15,6)) ts = delta[440:447] ts.plot.line(ax=ax1) ax1.set_xticks(range(7)) ax1.set_xticklabels(ts.index) ax1.set_ylabel('Percent Change') ts = delta[568:575] ts.plot.line(ax=ax2) ax2.set_xticks(range(7)) ax2.set_xticklabels(ts.index) ax2.set_ylabel('Percent Change') from sklearn.neighbors import NearestNeighbors import numpy as np from scipy.spatial import distance knn = 4 nbrs = NearestNeighbors(n_neighbors=knn, metric=distance.euclidean).fit(delta.as_matrix()) distances, indices = nbrs.kneighbors(delta.as_matrix()) anomaly_score = distances[:,knn-1] fig = plt.figure(figsize=(10,6)) ax = fig.add_subplot(111, projection='3d') p = ax.scatter(delta.MSFT,delta.F,delta.BAC,c=anomaly_score,cmap='jet') ax.set_xlabel('Microsoft') ax.set_ylabel('Ford') ax.set_zlabel('Bank of America') fig.colorbar(p) plt.show() anom = pd.DataFrame(anomaly_score, index=delta.index, columns=['Anomaly score']) result = pd.concat((delta,anom), axis=1) result.nlargest(5,'Anomaly score') fig = plt.figure(figsize=(10,4)) ax = fig.add_subplot(111) ts = delta[445:452] ts.plot.line(ax=ax) ax.set_xticks(range(7)) ax.set_xticklabels(ts.index) ax.set_ylabel('Percent Change')
0.633183
0.981875
# Chapter 7: Data Cleaning and Preparation ## 7.0. ``` import numpy as np import pandas as pd PREVIOUS_MAX_ROWS = pd.options.display.max_rows pd.options.display.max_rows = 20 np.random.seed(12345) import matplotlib.pyplot as plt plt.rc('figure', figsize=(10, 6)) np.set_printoptions(precision=4, suppress=True) ``` ## 7.1. Handling Missing Data ``` string_data = pd.Series(['aardvark', 'artichoke', np.nan, 'avocado']) string_data string_data.isnull() ``` In pandas, we’ve *adopted a convention used in the R programming language* by `referring to missing data as NA`, which stands for not available. In statistics applications, NA data may either be data that does not exist or that exists but was not observed (through problems with data collection, for example). *When cleaning up data for analysis, it is often important to do analysis on the missing data itself to identify data collection problems or potential biases in the data caused by missing data.* <br> The built-in Python `None` value `is also treated as NA` in object arrays: ``` string_data string_data[0] = None string_data.isnull() ``` ### 7.1.1. Filtering Out `Missing Data` There are a few ways to filter out missing data.<br> While you always have the option to do it by hand using pandas.isnull and boolean indexing, `the dropna` can be helpful. On a Series, *it returns the Series with only the nonnull data and index values*: ``` from numpy import nan as NA data = pd.Series([1, NA, 3.5, NA, 7]) data.dropna() #*it returns the Series with only the nonnull data and index values* ``` This is equivalent to: ``` data[data.notnull()] ``` With *DataFrame* objects, things are a bit more complex. You may want to drop rows or columns that are all NA or only those containing any NAs. `dropna` *by default drops any row containing a missing value*: ``` data = pd.DataFrame([[1., 6.5, 3.], [1., NA, NA], [NA, NA, NA], [NA, 6.5, 3.]]) cleaned = data.dropna() data cleaned ``` * Passing `how='all'` will only *drop rows* that are *all NA*: ``` data.dropna(how='all') #@P: only row 2 -> all N/A value will be dropped ``` To *drop columns in the same way* (only drop column that all N/A ), pass `axis=1`: ``` data[4] = NA data data.dropna(axis=1, how='all') ``` A related way to filter out DataFrame rows tends to concern time series data. *Suppose you want to keep only rows containing a certain number of observations*. You can indicate this with the `thresh` argument: ``` df = pd.DataFrame(np.random.randn(7, 3)) df.iloc[:4, 1] = NA df.iloc[:2, 2] = NA df df.dropna() df.dropna(thresh=2) #@P: set the threshold that row contain 2 n/a value will be dropped ``` ### 7.1.2 Filling In Missing Data Rather than filtering out missing data (and potentially discarding other data along with it), you may want to fill in the “holes” in any number of ways. For most purposes, the fillna method is the workhorse function to use.<br> Calling `fillna` with a constant replaces missing values with that value: ``` df.fillna(0) ``` Calling `fillna` with `a dict`, you can use a *different fill value* for `each column`: ``` df.fillna({1: 0.5, 2: 0}) ``` * *fillna returns a new object*, but you can modify the existing object `inplace` ``` _ = df.fillna(0, inplace=True) df ``` The same `interpolation methods` available for reindexing can be used with fillna: ``` df = pd.DataFrame(np.random.randn(6, 3)) df.iloc[2:, 1] = NA df.iloc[4:, 2] = NA df df.fillna(method='ffill') #@P note: ffill = forward fill df.fillna(method='ffill', limit=2) #@P note: ffill = forward fill but limit only 2 fill value ahead -> example in column index 1 ``` With fillna you can do lots of other things with a little creativity. For example, `you might pass the mean or median value of a Series`: ``` data = pd.Series([1., NA, 3.5, NA, 7]) data.fillna(data.mean()) #@P: fill the na with the mean of the series ``` ## 7.2. Data Transformation ### 7.2.1.Removing Duplicates Duplicate rows may be found in a DataFrame for any number of reasons. Here is an example: ``` data = pd.DataFrame({'k1': ['one', 'two'] * 3 + ['two'], 'k2': [1, 1, 2, 3, 3, 4, 4]}) data ``` The DataFrame method `duplicated` returns *a boolean Series indicating whether each row is a duplicate* (has been observed in a previous row) or not: ``` data.duplicated() ``` Relatedly, `drop_duplicates` returns a DataFrame where the duplicated array is False: ``` data.drop_duplicates() ``` `Both of these methods by default consider all of the columns`; alternatively, `you can specify any subset of them to detect duplicates`. <br> Suppose we had an additional column of values and wanted to filter duplicates only based on the 'k1' column: ``` data['v1'] = range(7) data data.drop_duplicates(['k1']) #@P: chỉ dựa trên value của column k1 để define duplicate -> các dòng index 2 -6 đều define là duplicate ->dropped ``` `duplicated` and `drop_duplicates` by default keep the first observed value combination. Passing `keep='last'` will return the last one: ``` data.drop_duplicates(['k1', 'k2'], keep='last') ``` ### 7.2.2. Transforming Data Using a Function or Mapping For many datasets, you may wish to *perform some transformation based on the values in an array, Series, or column in a DataFrame*. Consider the following hypothetical data collected about various kinds of meat: ``` data = pd.DataFrame({'food': ['bacon', 'pulled pork', 'bacon', 'Pastrami', 'corned beef', 'Bacon', 'pastrami', 'honey ham', 'nova lox'], 'ounces': [4, 3, 12, 6, 7.5, 8, 3, 5, 6]}) data ``` Suppose you wanted to add a column indicating the type of animal that each food came from. Let’s write down a mapping of each distinct meat type to the kind of animal: ``` meat_to_animal = { 'bacon': 'pig', 'pulled pork': 'pig', 'pastrami': 'cow', 'corned beef': 'cow', 'honey ham': 'pig', 'nova lox': 'salmon' } ``` The map method on a Series accepts a function or dict-like object containing a mapping, but here *we have a small problem in that some of the meats are capitalized and others are not*. Thus, we need to convert each value to lowercase using the `str.lower` Series method: ``` lowercased = data['food'].str.lower() lowercased data['animal'] = lowercased.map(meat_to_animal) data ``` We could also have passed a function that does all the work: ``` data['food'].map(lambda x: meat_to_animal[x.lower()]) #@P: this is a much more concise syntax ``` Using `map` is a convenient way to perform element-wise transformations and other data cleaning–related operations. ### 7.2.3. Replacing Values Filling in missing data with the fillna method is a special case of more general value replacement. As you’ve already seen, map can be used to modify a subset of values in an object but `replace` *provides a simpler and more flexible way* to do so.<br> Let’s consider this Series: ``` data = pd.Series([1., -999., 2., -999., -1000., 3.]) data ``` The -999 values might be sentinel values for missing data. To replace these with NA values that pandas understands, we can use `replace`, producing a new Series (unless you pass *inplace=True*): ``` data.replace(-999, np.nan) ``` If you want to *replace multiple values at once*, you instead pass a list and then the substitute value: ``` data.replace([-999, -1000], np.nan) ``` To *use a different replacement for each value*, pass a list of substitutes: ``` data.replace([-999, -1000], [np.nan, 0]) ``` The argument passed can also be a dict: ``` data.replace({-999: np.nan, -1000: 0}) #@P: this also do the replacement but much better syntax for longer list to keeptrack the pair of replacement value ``` **NOTE** <br> The `data.replace` method *is distinct* from `data.str.replace`, which *performs string substitution element-wise* (@P: which here should refer to str.replace, which is nearer modify noun). We look at these string methods on Series later in the chapter. ### 7.2.4. Renaming Axis Indexes Like values in a Series, axis labels can be similarly transformed by a function or mapping of some form to produce new, differently labeled objects. You *can also modify the axes in-place* without creating a new data structure. <br> Here’s a simple example: ``` data = pd.DataFrame(np.arange(12).reshape((3, 4)), index=['Ohio', 'Colorado', 'New York'], columns=['one', 'two', 'three', 'four']) data ``` Like a Series, the axis indexes have a `map` method: ``` transform = lambda x: x[:4].upper() data.index.map(transform) ``` You can assign to index, modifying the DataFrame in-place: ``` data.index = data.index.map(transform) data ``` If you want to create a transformed version of a dataset without modifying the original, a useful method is `rename`: ``` data.rename(index=str.title, columns=str.upper) ``` Notably, `rename` can be used in conjunction with a dict-like object providing new values for a subset of the axis labels: ``` data.rename(index={'OHIO': 'INDIANA'}, columns={'three': 'peekaboo'}) ``` `rename` saves you from the chore of *copying the DataFrame manually and assigning* to its *index and columns attributes*. Should you wish to modify a dataset in-place, pass inplace=True: ``` data.rename(index={'OHIO': 'INDIANA'}, inplace=True) data ``` ### 7.2.5. Discretization and Binning Continuous data is often discretized or otherwise separated into “bins” for analysis. <br> Suppose you have data about a group of people in a study, and you want to group them into discrete age buckets: ``` ages = [20, 22, 25, 27, 21, 23, 37, 31, 61, 45, 41, 32] ``` Let’s divide these into bins of 18 to 25, 26 to 35, 36 to 60, and finally 61 and older. To do so, you have to use `cut`, a function in pandas: ``` bins = [18, 25, 35, 60, 100] cats = pd.cut(ages, bins) cats ``` The object pandas returns is a special `Categorical object`. The output you see describes the bins computed by `pandas.cut`. You can treat it like an array of strings indicating the bin name; internally it contains a categories array specifying the distinct category names along with a labeling for the ages data in the `codes` attribute: ``` cats.codes #@P: this attribute map the category index of the value -> quick check that only 1 person 61 age -> group 3 (last group) cats.categories ``` Note that `pd.value_counts(cats)` are the *bin counts for the result* of `pandas.cut`. ``` pd.value_counts(cats) ``` Consistent with mathematical notation for intervals, `a parenthesis means that the side is open`, while the `square bracket means it is closed` (inclusive). <br> You can change which side is closed by passing right=False: ``` pd.cut(ages, [18, 26, 36, 61, 100], right=False) ``` You can also *pass your own bin names* by passing a list or array to the `labels` option: ``` group_names = ['Youth', 'YoungAdult', 'MiddleAged', 'Senior'] pd.cut(ages, bins, labels=group_names) ``` If you `pass an integer number of bins to cut` instead of explicit bin edges, `it will compute equal-length bins based on the minimum and maximum values in the data`. Consider the case of some uniformly distributed data chopped into fourths: ``` data = np.random.rand(20) pd.cut(data, 4, precision=2) #The precision=2 option limits the decimal precision to two digits. ``` A closely related function, `qcut`, *bins the data based on sample quantiles*. Depending on the distribution of the data, using cut will not usually result in each bin having the same number of data points. Since qcut uses sample quantiles instead, by definition you will obtain roughly equal-size bins ``` data = np.random.randn(1000) # Normally distributed cats = pd.qcut(data, 4) # Cut into quartiles cats pd.value_counts(cats) ``` Similar to cut you can pass your own quantiles (numbers between 0 and 1, inclusive): ``` pd.qcut(data, [0, 0.1, 0.5, 0.9, 1.]) #@P: pass my own quantiles to cut into bins ``` We’ll return to cut and qcut later in the chapter during our discussion of aggregation and group operations, as `these discretization functions are especially useful for quantile and group analysis`. ### 7.2.6. Detecting and Filtering Outliers Filtering or transforming outliers is largely a matter of applying array operations.<br> Consider a DataFrame with some normally distributed data: ``` data = pd.DataFrame(np.random.randn(1000, 4)) data.describe() ``` Suppose you wanted to find values in one of the columns exceeding 3 in absolute value: ``` col = data[2] col[np.abs(col) > 3] ``` To select all rows having a value exceeding 3 or –3, you can use the any method on a boolean DataFrame: ``` data[(np.abs(data) > 3).any(1)] ``` Values can be set based on these criteria. Here is code to *cap values outside the interval –3 to 3*: ``` data[np.abs(data) > 3] = np.sign(data) * 3 data.describe() ``` The statement `np.sign(data)` produces 1 and –1 values based on whether the `values` in data are `positive or negative`: ``` np.sign(data).head() ``` ### 7.2.7. Permutation and Random Sampling Permuting (*randomly reordering*) a Series or the rows in a DataFrame is easy to do using the `numpy.random.permutation` function. Calling *permutation* with the length of the axis you want to *permute produces an array of integers indicating the new ordering*: ``` df = pd.DataFrame(np.arange(5 * 4).reshape((5, 4))) sampler = np.random.permutation(5) sampler ``` That array can then be used in `iloc`-*based indexing* or the equivalent take function: ``` df df.take(sampler) ``` To select a random subset without replacement, you can use the `sample` method on Series and DataFrame: ``` df.sample(n=3) ``` To `generate a sample with replacement` (to allow repeat choices), pass *replace=True* to sample: ``` choices = pd.Series([5, 7, -1, 6, 4]) draws = choices.sample(n=10, replace=True) #@P: to have 10 samples from the the series components above draws ``` ### 7.2.8. Computing Indicator/Dummy Variables Another type of transformation for statistical modeling or machine learning applications is `converting a categorical variable into a “dummy” or “indicator” matrix`. I*f a column in a DataFrame has k distinct values, you would derive a matrix or DataFrame with k columns containing all 1s and 0s*. pandas has a get_dummies function for doing this, though devising one yourself is not difficult.<br> Let’s return to an earlier example DataFrame: ``` df = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'b'], 'data1': range(6)}) df pd.get_dummies(df['key']) ``` In some cases, you may want to add a prefix to the columns in the indicator DataFrame, which can then be merged with the other data. `get_dummies` has a prefix argument for doing this: ``` dummies = pd.get_dummies(df['key'], prefix='key') df_with_dummy = df[['data1']].join(dummies) df_with_dummy #@P: key_a = 1 mean the key = a; similar for key_b and key_c ``` If a row in a DataFrame belongs to multiple categories, things are a bit more complicated. Let’s look at the MovieLens 1M dataset, which is investigated in more detail in Chapter 14: ``` mnames = ['movie_id', 'title', 'genres'] movies = pd.read_table('datasets/movielens/movies.dat', sep='::', # header=None, names=mnames) header=None, names=mnames, encoding='ISO-8859-1') #@P: P research stackoverflow to fix encoding problem movies[:10] ``` Adding indicator variables for each genre requires a little bit of wrangling.<br> First, we extract the list of unique genres in the dataset: ``` all_genres = [] for x in movies.genres: all_genres.extend(x.split('|')) genres = pd.unique(all_genres) genres ``` One way to construct the indicator DataFrame is to start with a DataFrame of all zeros: ``` zero_matrix = np.zeros((len(movies), len(genres))) dummies = pd.DataFrame(zero_matrix, columns=genres) ``` Now, iterate through each movie and set entries in each row of dummies to 1. To do this, we use the dummies.columns to compute the column indices for each genre: ``` gen = movies.genres[0] gen.split('|') dummies.columns.get_indexer(gen.split('|')) ``` Then, we can use `.iloc` to set values based on these indices: ``` for i, gen in enumerate(movies.genres): indices = dummies.columns.get_indexer(gen.split('|')) dummies.iloc[i, indices] = 1 ``` Then, as before, you can combine this with movies: ``` movies_windic = movies.join(dummies.add_prefix('Genre_')) movies_windic.iloc[0] ``` **NOTE** For much larger data, this method of constructing indicator variables with multiple membership is not especially speedy. It would be better to write a lower-level function that writes directly to a NumPy array, and then wrap the result in a DataFrame. A useful recipe for statistical applications is to combine `get_dummies` with a discretization function like `cut`: ``` np.random.seed(12345) values = np.random.rand(10) values bins = [0, 0.2, 0.4, 0.6, 0.8, 1] pd.get_dummies(pd.cut(values, bins)) #@P: vietsub result: index ) -> value 0.9296 -> mark as 1 in group (0.8,1.0], #similarly for index 1 -> value 0.3164 -> belong to group (0.2,0.4] ``` We set the random seed with `numpy.random.seed` to make the example deterministic. We will look again at `pandas.get_dummies` later in the book. ## 7.3. String Manipulation ### 7.3.1. String Object Methods In many string munging and scripting applications, built-in string methods are sufficient.<br> As an example, a comma-separated string can be broken into pieces with `split`: ``` val = 'a,b, guido' val.split(',') ``` `split` is often combined with `strip` to trim whitespace (including line breaks): ``` pieces = [x.strip() for x in val.split(',')] pieces type(pieces) ``` These substrings could be concatenated together with a two-colon delimiter using addition: ``` first, second, third = pieces #@P: asign value first, second, third to each element of the list above first + '::' + second + '::' + third ``` But this isn’t a practical generic method. *A faster and more Pythonic way* is to pass a list or tuple to the `join` method on the string '::': ``` '::'.join(pieces) ``` Other methods are concerned with locating substrings. Using Python’s `in` keyword *is the best way to detect a substring*, though *index* and *find* *can also be used*: ``` val 'guido' in val val.index(',') val.find(':') ``` Note the difference between `find` and `index` is that `index raises an exception if the string isn’t found` (versus returning –1): ``` val.index(':') ``` Relatedly, `count` *returns the number of occurrences* of a particular substring: ``` val.count(',') ``` `replace` will substitute occurrences of one pattern for another. `It is commonly used to delete patterns, too`, *by passing an empty string*: ``` val.replace(',', '::') #@P: replace , with :: mark in string val.replace(',', '') #@P: replace , with empty == delete , mark in string ``` ### 7.3.2. Regular Expressions The `re` module functions fall into three categories:<br> * pattern matching * substitution * splitting <br> Naturally these are all related; a regex describes a pattern to locate in the text, which can then be used for many purposes.<br> Let’s look at a simple example: suppose we wanted to split a string with a variable number of whitespace characters (tabs, spaces, and newlines). The regex describing one or more whitespace characters is \s+: ``` import re text = "foo bar\t baz \tqux" text re.split('\s+', text) ``` When you call *re.split('\s+', text)*, the regular expression is first compiled, and then its split method is called on the passed text.<br><br> You can compile the regex yourself with `re.compile`, forming a reusable regex object: ``` regex = re.compile('\s+') regex.split(text) ``` If, instead, you wanted to get a list of all patterns matching the regex, you can use the `findall` method: ``` regex.findall(text) ``` **NOTE** To avoid unwanted escaping with \ in a regular expression, use `raw` string literals like `r'C:\x'` instead of the equivalent 'C:\\x'. Creating a regex object with `re.compile` is highly recommended if you intend to apply the same expression to many strings; doing so will save CPU cycles. `match` and `search` are closely related to `findall`. While `findall` *returns all matches in a string*, *search* returns only the *first match*. More rigidly, match only matches at the beginning of the string.<br> As a less trivial example, let’s consider a block of text and a regular expression capable of identifying most email addresses: ``` re.compile?? text = """Dave [email protected] Steve [email protected] Rob [email protected] Ryan [email protected] """ pattern = r'[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}' #@P vietsub: letter+number @letter+number . 'lettera-z' 2-4 # re.IGNORECASE makes the regex case-insensitive regex = re.compile(pattern, flags=re.IGNORECASE) ``` Using `findall` on the text produces a list of the email addresses: ``` regex.findall(text) ``` `search` returns a special match object for the *first email address in the text*. For the preceding regex, the match object can only tell us the start and end position of the pattern in the string: ``` m = regex.search(text) m text[m.start():m.end()] ``` `regex.match` returns `None`, as it only will match if the pattern occurs *at the start of the string*: ``` print(regex.match(text)) ``` Relatedly, sub will return a new string with occurrences of the pattern replaced by the a new string: ``` print(regex.sub('REDACTED', text)) ``` Suppose you wanted to find email addresses and simultaneously *segment each address into* its three components: `username`, `domain name`, and `domain suffix`<br> To do this, `put parentheses around the parts of the pattern to segment`: ``` pattern = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})' regex = re.compile(pattern, flags=re.IGNORECASE) ``` A match object produced by this modified regex (@P elaborate: using parentheses as above) returns a tuple of the pattern components with its `groups` method: ``` m = regex.match('[email protected]') m.groups() ``` findall returns a list of tuples when the pattern has groups: ``` regex.findall(text) ``` * `sub` also has `access to groups in each match` using special symbols like `\1` and `\2`. The symbol \1 corresponds to the first matched group, \2 corresponds to the second, and so forth: ``` print(regex.sub(r'Username: \1, Domain: \2, Suffix: \3', text)) ``` There is much more to regular expressions in Python, most of which is outside the book’s scope. Table 7-4 provides a brief summary. ### 7.3.3. Vectorized String Functions in pandas Cleaning up a messy dataset for analysis often requires a lot of string munging and regularization. To complicate matters, a column containing strings will sometimes have missing data: ``` data = {'Dave': '[email protected]', 'Steve': '[email protected]', 'Rob': '[email protected]', 'Wes': np.nan} data = pd.Series(data) data data.isnull() ``` You can apply string and regular expression methods can be applied (passing a lambda or other function) to each value using data.map, but it will fail on the NA (null) values.<br> To cope with this, Series has array-oriented methods for string operations that skip NA values. These are accessed through Series’s str attribute;<br> for example, *we could check whether each email address has 'gmail' in it* with `str.contains`: ``` data.str.contains('gmail') ``` Regular expressions can be used, too, along with any `re` options like IGNORECASE: ``` pattern data.str.findall(pattern, flags=re.IGNORECASE) ``` There are a couple of ways to do vectorized element retrieval. Either use `str.get` or index into the `str` attribute: ``` matches = data.str.match(pattern, flags=re.IGNORECASE) matches ``` To access elements in the embedded lists, we can pass an index to either of these functions: ``` matches.str.get(1) #@P20210830: error when run this command "Can only use .str accessor with string values!" matches.str[0] #@P20210830: error when run this command "Can only use .str accessor with string values!" ``` You can similarly slice strings using this syntax: ``` data.str[:5] pd.options.display.max_rows = PREVIOUS_MAX_ROWS ``` ## 7.4. Conclusion
github_jupyter
import numpy as np import pandas as pd PREVIOUS_MAX_ROWS = pd.options.display.max_rows pd.options.display.max_rows = 20 np.random.seed(12345) import matplotlib.pyplot as plt plt.rc('figure', figsize=(10, 6)) np.set_printoptions(precision=4, suppress=True) string_data = pd.Series(['aardvark', 'artichoke', np.nan, 'avocado']) string_data string_data.isnull() string_data string_data[0] = None string_data.isnull() from numpy import nan as NA data = pd.Series([1, NA, 3.5, NA, 7]) data.dropna() #*it returns the Series with only the nonnull data and index values* data[data.notnull()] data = pd.DataFrame([[1., 6.5, 3.], [1., NA, NA], [NA, NA, NA], [NA, 6.5, 3.]]) cleaned = data.dropna() data cleaned data.dropna(how='all') #@P: only row 2 -> all N/A value will be dropped data[4] = NA data data.dropna(axis=1, how='all') df = pd.DataFrame(np.random.randn(7, 3)) df.iloc[:4, 1] = NA df.iloc[:2, 2] = NA df df.dropna() df.dropna(thresh=2) #@P: set the threshold that row contain 2 n/a value will be dropped df.fillna(0) df.fillna({1: 0.5, 2: 0}) _ = df.fillna(0, inplace=True) df df = pd.DataFrame(np.random.randn(6, 3)) df.iloc[2:, 1] = NA df.iloc[4:, 2] = NA df df.fillna(method='ffill') #@P note: ffill = forward fill df.fillna(method='ffill', limit=2) #@P note: ffill = forward fill but limit only 2 fill value ahead -> example in column index 1 data = pd.Series([1., NA, 3.5, NA, 7]) data.fillna(data.mean()) #@P: fill the na with the mean of the series data = pd.DataFrame({'k1': ['one', 'two'] * 3 + ['two'], 'k2': [1, 1, 2, 3, 3, 4, 4]}) data data.duplicated() data.drop_duplicates() data['v1'] = range(7) data data.drop_duplicates(['k1']) #@P: chỉ dựa trên value của column k1 để define duplicate -> các dòng index 2 -6 đều define là duplicate ->dropped data.drop_duplicates(['k1', 'k2'], keep='last') data = pd.DataFrame({'food': ['bacon', 'pulled pork', 'bacon', 'Pastrami', 'corned beef', 'Bacon', 'pastrami', 'honey ham', 'nova lox'], 'ounces': [4, 3, 12, 6, 7.5, 8, 3, 5, 6]}) data meat_to_animal = { 'bacon': 'pig', 'pulled pork': 'pig', 'pastrami': 'cow', 'corned beef': 'cow', 'honey ham': 'pig', 'nova lox': 'salmon' } lowercased = data['food'].str.lower() lowercased data['animal'] = lowercased.map(meat_to_animal) data data['food'].map(lambda x: meat_to_animal[x.lower()]) #@P: this is a much more concise syntax data = pd.Series([1., -999., 2., -999., -1000., 3.]) data data.replace(-999, np.nan) data.replace([-999, -1000], np.nan) data.replace([-999, -1000], [np.nan, 0]) data.replace({-999: np.nan, -1000: 0}) #@P: this also do the replacement but much better syntax for longer list to keeptrack the pair of replacement value data = pd.DataFrame(np.arange(12).reshape((3, 4)), index=['Ohio', 'Colorado', 'New York'], columns=['one', 'two', 'three', 'four']) data transform = lambda x: x[:4].upper() data.index.map(transform) data.index = data.index.map(transform) data data.rename(index=str.title, columns=str.upper) data.rename(index={'OHIO': 'INDIANA'}, columns={'three': 'peekaboo'}) data.rename(index={'OHIO': 'INDIANA'}, inplace=True) data ages = [20, 22, 25, 27, 21, 23, 37, 31, 61, 45, 41, 32] bins = [18, 25, 35, 60, 100] cats = pd.cut(ages, bins) cats cats.codes #@P: this attribute map the category index of the value -> quick check that only 1 person 61 age -> group 3 (last group) cats.categories pd.value_counts(cats) pd.cut(ages, [18, 26, 36, 61, 100], right=False) group_names = ['Youth', 'YoungAdult', 'MiddleAged', 'Senior'] pd.cut(ages, bins, labels=group_names) data = np.random.rand(20) pd.cut(data, 4, precision=2) #The precision=2 option limits the decimal precision to two digits. data = np.random.randn(1000) # Normally distributed cats = pd.qcut(data, 4) # Cut into quartiles cats pd.value_counts(cats) pd.qcut(data, [0, 0.1, 0.5, 0.9, 1.]) #@P: pass my own quantiles to cut into bins data = pd.DataFrame(np.random.randn(1000, 4)) data.describe() col = data[2] col[np.abs(col) > 3] data[(np.abs(data) > 3).any(1)] data[np.abs(data) > 3] = np.sign(data) * 3 data.describe() np.sign(data).head() df = pd.DataFrame(np.arange(5 * 4).reshape((5, 4))) sampler = np.random.permutation(5) sampler df df.take(sampler) df.sample(n=3) choices = pd.Series([5, 7, -1, 6, 4]) draws = choices.sample(n=10, replace=True) #@P: to have 10 samples from the the series components above draws df = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'b'], 'data1': range(6)}) df pd.get_dummies(df['key']) dummies = pd.get_dummies(df['key'], prefix='key') df_with_dummy = df[['data1']].join(dummies) df_with_dummy #@P: key_a = 1 mean the key = a; similar for key_b and key_c mnames = ['movie_id', 'title', 'genres'] movies = pd.read_table('datasets/movielens/movies.dat', sep='::', # header=None, names=mnames) header=None, names=mnames, encoding='ISO-8859-1') #@P: P research stackoverflow to fix encoding problem movies[:10] all_genres = [] for x in movies.genres: all_genres.extend(x.split('|')) genres = pd.unique(all_genres) genres zero_matrix = np.zeros((len(movies), len(genres))) dummies = pd.DataFrame(zero_matrix, columns=genres) gen = movies.genres[0] gen.split('|') dummies.columns.get_indexer(gen.split('|')) for i, gen in enumerate(movies.genres): indices = dummies.columns.get_indexer(gen.split('|')) dummies.iloc[i, indices] = 1 movies_windic = movies.join(dummies.add_prefix('Genre_')) movies_windic.iloc[0] np.random.seed(12345) values = np.random.rand(10) values bins = [0, 0.2, 0.4, 0.6, 0.8, 1] pd.get_dummies(pd.cut(values, bins)) #@P: vietsub result: index ) -> value 0.9296 -> mark as 1 in group (0.8,1.0], #similarly for index 1 -> value 0.3164 -> belong to group (0.2,0.4] val = 'a,b, guido' val.split(',') pieces = [x.strip() for x in val.split(',')] pieces type(pieces) first, second, third = pieces #@P: asign value first, second, third to each element of the list above first + '::' + second + '::' + third '::'.join(pieces) val 'guido' in val val.index(',') val.find(':') val.index(':') val.count(',') val.replace(',', '::') #@P: replace , with :: mark in string val.replace(',', '') #@P: replace , with empty == delete , mark in string import re text = "foo bar\t baz \tqux" text re.split('\s+', text) regex = re.compile('\s+') regex.split(text) regex.findall(text) re.compile?? text = """Dave [email protected] Steve [email protected] Rob [email protected] Ryan [email protected] """ pattern = r'[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}' #@P vietsub: letter+number @letter+number . 'lettera-z' 2-4 # re.IGNORECASE makes the regex case-insensitive regex = re.compile(pattern, flags=re.IGNORECASE) regex.findall(text) m = regex.search(text) m text[m.start():m.end()] print(regex.match(text)) print(regex.sub('REDACTED', text)) pattern = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})' regex = re.compile(pattern, flags=re.IGNORECASE) m = regex.match('[email protected]') m.groups() regex.findall(text) print(regex.sub(r'Username: \1, Domain: \2, Suffix: \3', text)) data = {'Dave': '[email protected]', 'Steve': '[email protected]', 'Rob': '[email protected]', 'Wes': np.nan} data = pd.Series(data) data data.isnull() data.str.contains('gmail') pattern data.str.findall(pattern, flags=re.IGNORECASE) matches = data.str.match(pattern, flags=re.IGNORECASE) matches matches.str.get(1) #@P20210830: error when run this command "Can only use .str accessor with string values!" matches.str[0] #@P20210830: error when run this command "Can only use .str accessor with string values!" data.str[:5] pd.options.display.max_rows = PREVIOUS_MAX_ROWS
0.505127
0.977564
<h1 align="center">PROGRAMACIÓN DE COMPUTADORES </h1> <h2 align="center">UNIVERSIDAD EAFIT</h2> <h3 align="center">MEDELLÍN - COLOMBIA </h3> <h2 align="center">Sesión 06 - Excepciones</h2> ### Errores y Excepciones Una excepción es un error que ocurre durante la ejecución de un programa. - Las excepciones son conocidas por los no programadores como instancias que no se ajustan a una regla general. - El nombre "excepción" en informática también tiene este significado: Implica que el problema (la excepción) no ocurre con frecuencia, es decir, la excepción es la "excepción a la regla". - El manejo de excepciones es una construcción en algunos lenguajes de programación para manejar o tratar errores automáticamente. - Muchos lenguajes de programación como C ++, Objective-C, PHP, Java, Ruby, Python y muchos otros tienen soporte incorporado para el manejo de excepciones. El tratamiento de errores generalmente se resuelve al guardar el estado de ejecución en el momento en que se produjo el error e interrumpir el flujo normal del programa para ejecutar una función o código especial, que se conoce como manejador de excepciones. - Dependiendo del tipo de error ("división por cero", "error abierto de archivo", etc.), el manejador de errores puede "arreglar" el problema y el programa puede continuar después con los datos previamente guardados. ### Manejo de Excepciones El manejo de excepciones en *Python* es muy similar a *Java*. - El código, que alberga el riesgo de una excepción, está incrustado en un bloque `try`. - Pero mientras que en Java las excepciones son capturadas por cláusulas catch, tenemos declaraciones introducidas por una palabra clave `except` en Python. - Es posible crear excepciones "hechas a medida": con la instrucción raise es posible forzar una excepción especificada a ocurrir. Veamos un ejemplo sencillo. - Asumiendo que se quiere pedir al usuario que introduzca un número entero. - Si se usa una entrada (), la entrada será una cadena, que hay que convertir en un entero. - Si la entrada no ha sido un entero válido, se generarrá un `ValueError`. ``` n = int(input("Ingrese un Numero entero: ")) import sys try: number = int(input("Entre un número entero")) except ValueError: print("Err.. Solo se aceptan números enteros") sys.exit() print ("Su número fue ", number) ``` Otros ejemplos de excepciones... ``` 0./0. 4 + spam * 3 "2" / 2 ``` Con la ayuda de manejo de excepciones, se puede escribir un código robusto para leer un entero de entrada: ``` while True: try: n = int(input("Ingrese un entero: ")) break except ValueError: print("No es un entero válido! intente otra vez...") print("Genial, has ingresado un entero!") ``` Es un ciclo, que se rompe sólo, si se ha dado un entero válido. El script funciona de la siguiente manera: - Se introduce el ciclo `while`. - El código dentro de la cláusula `try` se ejecutará instrucción por instrucción. - Si no se produce ninguna excepción durante la ejecución, la ejecución alcanzará la sentencia `break` y se dejará el ciclo `while`. - Si se produce una excepción, es decir, en la conversión de $n$, se omitirá el resto del bloque `try` y se ejecutará la cláusula `except`. - El error elevado, en nuestro caso un *ValueError*, tiene que coincidir con uno de los nombres después de `except`. - En nuestro ejemplo sólo uno, es decir, `ValueError:`. - Después de imprimir el texto de la sentencia de impresión, la ejecución realiza otro ciclo. - Comienza con una nueva entrada (). ### Cláusulas de Excepciones Múltiples Una sentencia `try` puede tener más de una cláusula *except* para diferentes excepciones. Pero como máximo se ejecutará una cláusula `except`. El siguiente ejemplo muestra una cláusula `try`, en la que abrimos un archivo para leer, leemos una línea de este archivo y convertimos esta línea en un entero. Existen al menos dos posibles excepciones: - `IOError` - `ValueError` Sólo en caso de que se tenga una cláusula adicional sin nombre para un error inesperado: ``` f = open('enteroshgk.txt') import sys try: f = open('enteros.txt') s = f.readline() i = int(s.strip()) except IOError as e: errno, strerror = e.args print("I/O error({0}): {1}".format(errno,strerror)) # e puede imprimirse directamente sin usar .args: # print(e) except ValueError: print("No hay un número entero válido en la línea.") except: print("Error inesperado:", sys.exc_info()[0]) raise ``` El manejo del `IOError` en el ejemplo anterior es de especial interés. - La cláusula `except` para `IOError` especifica una variable ""*e*" después del nombre de excepción (`IOError`). La variable "*e*" está enlazada a una instancia de excepción con los argumentos almacenados en `instance.args`. Si llamamos al script anterior con un archivo no existente, recibiremos el mensaje: Y si el archivo *enteros.txt* no es legible, es decir Si no tenemos el permiso para leerlo, obtendremos el siguiente mensaje: Una cláusula `except` puede llamar a más de una excepción en una tupla de nombres de error: ``` try: f = open('enteros.txt') s = f.readline() i = int(s.strip()) except (IOError, ValueError): print("Un error I/O o ValueError ha ocurrido") except: print("Un error inesperado ha occurrido") raise ```
github_jupyter
n = int(input("Ingrese un Numero entero: ")) import sys try: number = int(input("Entre un número entero")) except ValueError: print("Err.. Solo se aceptan números enteros") sys.exit() print ("Su número fue ", number) 0./0. 4 + spam * 3 "2" / 2 while True: try: n = int(input("Ingrese un entero: ")) break except ValueError: print("No es un entero válido! intente otra vez...") print("Genial, has ingresado un entero!") f = open('enteroshgk.txt') import sys try: f = open('enteros.txt') s = f.readline() i = int(s.strip()) except IOError as e: errno, strerror = e.args print("I/O error({0}): {1}".format(errno,strerror)) # e puede imprimirse directamente sin usar .args: # print(e) except ValueError: print("No hay un número entero válido en la línea.") except: print("Error inesperado:", sys.exc_info()[0]) raise try: f = open('enteros.txt') s = f.readline() i = int(s.strip()) except (IOError, ValueError): print("Un error I/O o ValueError ha ocurrido") except: print("Un error inesperado ha occurrido") raise
0.093463
0.869382
# Psytrack Examples References to the paper ***Efficient inference for time-varying behavior during learning*** published in NeurIPS 2018 Link to paper: http://pillowlab.princeton.edu/pubs/Roy18_NeurIPS_dynamicPsychophys.pdf --- ``` %matplotlib inline %load_ext autoreload %autoreload 2 import numpy as np from matplotlib import pyplot as plt ``` # Quick Start This is a < 5 min intro to Psytrack through the use of a simulated dataset. Please find a much more in-depth tutorial with real data below. ### Generate simulated data ``` from psytrack.runSim import generateSim K = 4 # number of weights to simulate N = 10000 # number of trials to simulate seed = 41 # random seed to reproduce generated data simData = generateSim(K, N, seed=seed) print("sigma^2 associated with each weight: 2^" + str(np.log2(simData['sigma']))) print("number of beahvioral realizations (y's) generated and stored: ", len(simData['all_Y'])) print("weights generated:") plt.figure(figsize=(10,5)) plt.plot(simData['W']) plt.xlabel("Trial #"); plt.ylabel("Weight"); ``` ### Recover weights from simulated behavior ``` from psytrack.runSim import recoverSim rec = recoverSim(simData) print("Processing time:", rec['duration']) # Display recovered weights overlaid on real weights plt.figure(figsize=(10,5)) plt.plot(simData['W'], linestyle="-", alpha=0.5) plt.plot(rec['wMode'].T, linestyle="--", lw=3, color="black") plt.xlabel("Trial #"); plt.ylabel("Weight"); # Display recovered sigmas overlaid on real sigmas plt.figure(figsize=(3,6)) plt.plot(np.arange(K)+1, np.log2(simData['sigma']), 'bo', label="Real Sigma") plt.plot(np.arange(K)+1, np.log2(rec['hyp']['sigma']), 'r+', label="Recovered Sigma") plt.xlabel(r"$\sigma^2_i$", fontsize=16) plt.ylabel(r"$\log_2(\sigma^2)$", fontsize=16) plt.legend() ``` **Note:** To recreate figures like Figures 2a-c in paper, use a single set of true weights $\bf{w}$ and different samples of $X \sim \mathcal{N}(0,1)$ --- # Real Data Datasets handled by Psytrack are specific to an individual animal and are stored as a Python dictionary ``` # Extract premade dataset from npz D = np.load('sampleRatData.npz')['D'].item() print("The keys of the dict for this example animal:\n ", list(D.keys())) ``` Of these keys, only `y` and `inputs` are necessary for analysis of the dataset --- `y` should be a 1D array of the animal's choice on each trial. Currently, the analysis only works for two-alternative forced choice tasks, and so there should only be two options on each trial (error or omission trials are typically discarded from the analysis). The two options (A or B, Left or Right, etc.) **must** be mapped to 1 and 2 in `y` (not 0 and 1, or -1 and +1). In this example, Left=1 and Right=2. ``` print("The shape of y: ", D['y'].shape) print("The number of trials: N =", D['y'].shape[0]) print("The unique entries of y: ", np.unique(D['y'])) ``` --- `inputs` is itself another dictionary, containing arbitrary keys. Each of these keys represents a _potential_ input into the model and must be a 2D array of shape $(N, M)$ where $N$ is the number of trials. The number of columns $M$ is arbitrary, and the $i^{th}$ column is typically used to encode information from $i$ time steps previous. For example, in our example data set the key `s1` encodes the (normalized) stimulus values heard on each trial. `s1[7,0]` would encode the stimulus heard on the 7th trial where as both `s1[6,0]` and `s1[7,1]` would encode the stimulus heard on the 6th trial. The information is redundant, but allows for all feasible regressors to predicting behavior of trial $i$ to be accessible by referencing the $i^{th}$ row of the respective input array. ``` print("The keys of inputs:\n ", list(D['inputs'].keys())) print("\nThe shape of s1:", D['inputs']['s1'].shape) print("s1[7] : ", D['inputs']['s1'][7]) print("s1[6,0] : ", D['inputs']['s1'][6,0]) print("s1[7,1] : ", D['inputs']['s1'][7,1]) ``` --- Other keys are for convenience: `name` stores the name of the animal, `answer` is an easy reference as to what the correct choice was on a given trial (can be inferred from `inputs`), and `correct` is an easy reference as to whether the animal made the correct choice on a given trial (inferred from `inputs` and `y`). `dayLength` is an array storing the number of trials that occurred in each session of training. Taking a cumulative sum will give you the indices at which each new session begins. This can be useful for post-hoc analysis, but is necessary for the analysis if one wishes to use the `sigmaDay` functionality (see Section 3.3 in paper). --- ## Fitting the data Once you have your data arranged in the proper format, you can now run the analysis! The fitting function is called `hyperOpt()` and before using it, you must decide on 3 inputs: 1) `weights` : which of your inputs should you fit 2) `hyper` : what hyperparameters should your model have and how should they be initialized 3) `optList` : what subset of the hyperparameters should be optimized `weights` is a dictionary where the keys correspond to the keys in your dataset's `inputs` dictionary, and the key values are an integer indicating how many of the columns of that value in `inputs` ought to be used for fitting. You can also include in `weights` the special key `bias` which need not be included in `inputs` --- this will simply create an input of all 1s. ``` from psytrack.hyperOpt import hyperOpt weights = {'bias' : 1, # a special key 's1' : 1, # use only the first column of s1 from inputs 's2' : 1} # use only the first column of s2 from inputs # It is often useful to have the total number of weights K in your model K = np.sum([weights[i] for i in weights.keys()]) ``` `hyper` is a dictionary that indicates what hyperparameters your model will have. There are 3 types, the only necessary one being `sigma` which controls trial-to-trial variability. Optionally, you may also include `sigInit` which controls the variability on the very first trial (e.g. how close weights must initialize to 0) --- it is often best to include this hyperparameter and set it to a high value as you often prefer the data to determine where the weights ought to initialize. The final optional hyperparameter to include is `sigDay` which controls variability between sessions (that is, between the last trial of one session and the first trial of the next session) --- see Section 3.3 in the paper for more info on `sigDay`. For each type of hyperparameter included in `hyper`, you must also select an initial value. If you are optimizing over a particular hyperparameter, than the initial value is not so important as the fitting procedure will eventually converge regardless. However, if you are *not* optimizing, then the initial value will be the fixed value of the hyperparameter. Finally, for each hyperparameter key in `hyper`, you must specify your initializations as a 1D array with length $K$. If you provide only a single value, then the optimizer will assume that you want the same hyperparameter to apply to every weight (as opposed to each weight having it's own). ``` hyper= {'sigInit' : 2**4., 'sigma' : [2**-4.]*K, # Each weight will have it't own sigma, but all initialized the same 'sigDay' : None} # Not necessary to specify as None, but keeps things general ``` `optList` is a list of the subset of hyperparameters in `hyper` that you wish to optimize over in your model. It is typically unnecessary to optimize over `sigInit` though you will often wish to provide initial values. ``` optList = ['sigma'] ``` Now that we have specified `weights`, `hyper`, and `optList`, we can fit our model with `hyperOpt()`! The function takes your dataset `D` and the three additional inputs, and returns 4 things. 1) `hyp` : a dictionary of the optimized hyperparameters 2) `evd` : the approximate model evidence of the optimized model 3) `wMode` : the weights of the optimized model 4) `hess` : a dictionary of sparse terms that relate to the Hessian of the optimal model, and can be used to calculate posterior credible intervals on the weights Run times will depend on the number of trials $N$ and weights $K$ as well as the number of hyperparameters being fit. Refer to Figure 2 in the paper for a rough idea of how long things ought to take. This ought to take < 2 minutes! ``` hyp, evd, wMode, hess = hyperOpt(D, hyper, weights, optList) # Let's quickly plot the results! plt.figure(figsize=(10,5)) plt.plot(wMode.T) plt.xlabel("Trial #"); plt.ylabel("Weight"); ``` Fortunately, Psytrack includes a much nicer plotting function for visualizing the model ``` from psytrack.plot.analysisFunctions import makeWeightPlot makeWeightPlot(wMode, D, weights, END=10000) ``` The labels, color scheme, and many other plotting details are hard-coded into `makeWeightPlot()` and so will likely need some adjustment to fit to your specific dataset, but it offers a blueprint and a variety of handy features. For example, adding credible intervals on the weights can be done, but requires an extra bit of processing on the `hess` returned by `hyperOpt()` ``` from psytrack.aux.invBlkTriDiag import getCredibleInterval credibleInt = getCredibleInterval(hess) # Replot with 95% posterior credible interval makeWeightPlot(wMode, D, weights, END=10000, errorbar=credibleInt) ``` We can also add two subplots with useful information: 1) A performance plot, tracking the animals task accuracy over time 2) A bias plot, tracking the animal's choice bias over time Both of these plots will calculate their respective values directly from the data ("empirical") with 2SD error bars, but also overlay the corresponding values as predicted by the model weights ("predicted"). ``` makeWeightPlot(wMode, D, weights, END=10000, errorbar=credibleInt, perf_plot=True, bias_plot=True) ``` The predictions in the above plot simply use the weight trajectories found by the model which used every trial for fitting. If you wished to make true predictions, you would need to make predictions on trials that were held-out from the fitting procedure. We can do this using the built-in cross-validation functions `Kfold_crossVal()` and `Kfold_crossVal_check()`. This is also useful if you'd like to compare different models via cross-validated log-likelihood rather than approximate model evidence. `Kfold_crossVal()` receives a dataset and a specified number of cross-validation folds `F` and returns `F` smaller *training* datasets each with a corresponding *test* dataset that contain a random `1/F` of trials from the original dataset. Every trial in the dataset will now be a held-out trial in one of the `F` test sets. ``` from psytrack.aux.crossValidation import Kfold_crossVal folds = 10 # number of cross-validation folds trainDs, testDs = Kfold_crossVal(D, F=folds) ``` Now that the original dataset has been split into `F` pairs of training and testing datasets, we can fit each of the `F` training sets normally. Then using the weights recovered from each training set, we can *infer* the weights on the held-out trials with a simple interpolation between the nearest training trials. This is done in `Kfold_crossVal_check()` which returns the interpolated $\bf{g} \cdot \bf{w}$ for each test trial and the corresponding log-likelihood. **Note:** This is a computationally expensive procedure due to the fitting of `F` distinct datasets. Expect the cell below to run for 15-20 minutes! ``` from psytrack.aux.crossValidation import Kfold_crossVal_check test_results = [] for k in range(folds): print("Running xval fold", k+1) _, _, wMode_K, _ = hyperOpt(trainDs[k], hyper, weights, optList) logli, gw = Kfold_crossVal_check(testDs[k], wMode_K, trainDs[k]['missing_trials'], weights) res = {'logli' : np.sum(logli), 'gw' : gw, 'test_inds' : testDs[k]['test_inds']} test_results += [res] print("Cross-validated log-likelihood of model:", np.sum([i['logli'] for i in test_results])) makeWeightPlot(wMode, D, weights, END=10000, errorbar=credibleInt, perf_plot=True, bias_plot=True, prediction=test_results) ``` --- ### Other useful things The `trim()` function can be used to easily slice a dataset with a new start and end trial The `jacHessCheck()` function can provide a numerical check on the analytical jacobian and hessian calculated by the model
github_jupyter
%matplotlib inline %load_ext autoreload %autoreload 2 import numpy as np from matplotlib import pyplot as plt from psytrack.runSim import generateSim K = 4 # number of weights to simulate N = 10000 # number of trials to simulate seed = 41 # random seed to reproduce generated data simData = generateSim(K, N, seed=seed) print("sigma^2 associated with each weight: 2^" + str(np.log2(simData['sigma']))) print("number of beahvioral realizations (y's) generated and stored: ", len(simData['all_Y'])) print("weights generated:") plt.figure(figsize=(10,5)) plt.plot(simData['W']) plt.xlabel("Trial #"); plt.ylabel("Weight"); from psytrack.runSim import recoverSim rec = recoverSim(simData) print("Processing time:", rec['duration']) # Display recovered weights overlaid on real weights plt.figure(figsize=(10,5)) plt.plot(simData['W'], linestyle="-", alpha=0.5) plt.plot(rec['wMode'].T, linestyle="--", lw=3, color="black") plt.xlabel("Trial #"); plt.ylabel("Weight"); # Display recovered sigmas overlaid on real sigmas plt.figure(figsize=(3,6)) plt.plot(np.arange(K)+1, np.log2(simData['sigma']), 'bo', label="Real Sigma") plt.plot(np.arange(K)+1, np.log2(rec['hyp']['sigma']), 'r+', label="Recovered Sigma") plt.xlabel(r"$\sigma^2_i$", fontsize=16) plt.ylabel(r"$\log_2(\sigma^2)$", fontsize=16) plt.legend() # Extract premade dataset from npz D = np.load('sampleRatData.npz')['D'].item() print("The keys of the dict for this example animal:\n ", list(D.keys())) print("The shape of y: ", D['y'].shape) print("The number of trials: N =", D['y'].shape[0]) print("The unique entries of y: ", np.unique(D['y'])) print("The keys of inputs:\n ", list(D['inputs'].keys())) print("\nThe shape of s1:", D['inputs']['s1'].shape) print("s1[7] : ", D['inputs']['s1'][7]) print("s1[6,0] : ", D['inputs']['s1'][6,0]) print("s1[7,1] : ", D['inputs']['s1'][7,1]) from psytrack.hyperOpt import hyperOpt weights = {'bias' : 1, # a special key 's1' : 1, # use only the first column of s1 from inputs 's2' : 1} # use only the first column of s2 from inputs # It is often useful to have the total number of weights K in your model K = np.sum([weights[i] for i in weights.keys()]) hyper= {'sigInit' : 2**4., 'sigma' : [2**-4.]*K, # Each weight will have it't own sigma, but all initialized the same 'sigDay' : None} # Not necessary to specify as None, but keeps things general optList = ['sigma'] hyp, evd, wMode, hess = hyperOpt(D, hyper, weights, optList) # Let's quickly plot the results! plt.figure(figsize=(10,5)) plt.plot(wMode.T) plt.xlabel("Trial #"); plt.ylabel("Weight"); from psytrack.plot.analysisFunctions import makeWeightPlot makeWeightPlot(wMode, D, weights, END=10000) from psytrack.aux.invBlkTriDiag import getCredibleInterval credibleInt = getCredibleInterval(hess) # Replot with 95% posterior credible interval makeWeightPlot(wMode, D, weights, END=10000, errorbar=credibleInt) makeWeightPlot(wMode, D, weights, END=10000, errorbar=credibleInt, perf_plot=True, bias_plot=True) from psytrack.aux.crossValidation import Kfold_crossVal folds = 10 # number of cross-validation folds trainDs, testDs = Kfold_crossVal(D, F=folds) from psytrack.aux.crossValidation import Kfold_crossVal_check test_results = [] for k in range(folds): print("Running xval fold", k+1) _, _, wMode_K, _ = hyperOpt(trainDs[k], hyper, weights, optList) logli, gw = Kfold_crossVal_check(testDs[k], wMode_K, trainDs[k]['missing_trials'], weights) res = {'logli' : np.sum(logli), 'gw' : gw, 'test_inds' : testDs[k]['test_inds']} test_results += [res] print("Cross-validated log-likelihood of model:", np.sum([i['logli'] for i in test_results])) makeWeightPlot(wMode, D, weights, END=10000, errorbar=credibleInt, perf_plot=True, bias_plot=True, prediction=test_results)
0.513668
0.960694
# Pokedex This notebook is used to collect the full pokedex dataset from Gen I to Gen VIII ``` import pandas as pd import requests from bs4 import BeautifulSoup ``` We will use bulbapedia.bulbagarden.net to collect the name of all the pokemon. ``` url = "https://bulbapedia.bulbagarden.net/wiki/List_of_Pokémon_by_National_Pokédex_number" r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") rows = soup.find_all("tr") df = pd.DataFrame() for row in rows: if len(row.find_all("th")) > 1: headers = [header.text.replace('\n','').replace(' ','') for header in row.find_all("th")] elif len(row.find_all("td")) > 1: record = {} for i in range(len(row.find_all("td"))): record[headers[i]] = row.find_all("td")[i].text.replace('\n','').replace(' ','') df = df.append(pd.Series(record), ignore_index=True) cols = ['Ndex', 'MS'] df = df[cols] df.drop_duplicates(inplace=True) pokemon_names = df['MS'].to_list() pokemon_names = [item.lower() for item in pokemon_names] print(pokemon_names[0:5]) ``` Now we will use the primary data source of Serebii.net using their Gen VIII pokedex ``` url = "https://www.serebii.net/pokedex-swsh/{}/#stats" def get_number(soup): table = soup.find_all("table", class_="dextable")[1] num = table.find_all("tr")[1].find_all("td", class_="fooinfo")[2].text.strip().split('\n')[0].split("#")[-1] return num def get_name(soup): table = soup.find_all("table", class_="dextable")[1] name = table.find_all("tr")[1].find_all("td")[0].text return name def get_altnames(soup): altnames = {} table = soup.find_all("table", class_="dextable")[1] rows = table.find_all("tr")[1].find_all("td")[1].find_all("tr") for row in rows: key = row.find_all("td")[0].text.strip().replace(":","") contents = row.find_all("td")[1].contents value = [x for x in contents if isinstance(x, type(contents[0]))] altnames[key] = value return altnames def get_type(soup): types = [] table = soup.find_all("table", class_="dextable")[1] imgs = table.find_all("img") for img in imgs: types.append(img["src"].split("/")[-1].split(".")[0]) return types def get_gender(soup): gender_ratios = {} table = soup.find_all("table", class_="dextable")[1] try: #genderless pokemon will cause exception rows = table.find_all("tr")[1].find_all("td", class_="fooinfo")[3].contents[0].find_all("tr") for row in rows: key = row.find_all("td")[0].text.split(" ")[0] value = row.find_all("td")[1].text gender_ratios[key] = value except: pass return gender_ratios def get_classification(soup): table = soup.find_all("table", class_="dextable")[1] classification = table.find_all("td", class_="fooinfo")[4].text return classification def get_height(soup): table = soup.find_all("table", class_="dextable")[1] height = table.find_all("td", class_="fooinfo")[5].text.split("\t")[-1] return height def get_weight(soup): table = soup.find_all("table", class_="dextable")[1] weight = table.find_all("td", class_="fooinfo")[6].text.split("\t")[-1] return weight def get_capture_rate(soup): table = soup.find_all("table", class_="dextable")[1] rate = table.find_all("td", class_="fooinfo")[7].text.split("\t")[-1] return rate def get_base_egg_steps(soup): table = soup.find_all("table", class_="dextable")[1] steps = table.find_all("td", class_="fooinfo")[8].text.split("\t")[-1].replace(",","") return steps def get_base_stats(soup): base_stats = {} stat_names = ["HP","Attack","Defense","Sp. Attack","Sp. Defense","Speed"] stats_indices = [] for row in soup.find_all("tr"): if 'Stats' == row.text.replace('\n',''): stats_indices.append(soup.find_all("tr").index(row)) try: columns = soup.find_all("tr")[stats_indices[0]+2].find_all("td") except: columns = soup.find_all("tr")[stats_indices[1]+2].find_all("td") for i in range(len(stat_names)): base_stats[stat_names[i]] = columns[i+1].text return base_stats def get_legendary_status(name): legendary_status = {"sublegendary": 0, "legendary": 0, "mythical": 0} if name in status_dict["sublegendary"]: legendary_status["sublegendary"] = 1 elif name in status_dict["legendary"]: legendary_status["legendary"] = 1 elif name in status_dict["mythical"]: legendary_status["mythical"] = 1 return legendary_status def get_experience_growth(soup): table = soup.find_all("table", class_="dextable")[2].find_all("tr", recursive=False)[3] exp = table.find_all("tr", recursive=False)[3].td.contents[0].split(" ")[0].replace(",","") return exp def get_base_happiness(soup): table = soup.find_all("table", class_="dextable")[2].find_all("tr", recursive=False)[3] happiness = table.find_all("tr", recursive=False)[3].find_all("td",recursive=False)[1].text return happiness def get_against(soup): headers = [] against_dict = {} links = soup.find_all("table", class_="dextable")[3].find_all("tr",recursive=False)[1].find_all("a") for link in links: header = link['href'].split("/")[-1].split(".")[0] headers.append(header) columns = soup.find_all("table", class_="dextable")[3].find_all("tr",recursive=False)[2].find_all('td') for i in range(len(headers)): against_dict[headers[i]] = columns[i].text.split('*')[-1] return against_dict def get_abilities(soup): table = soup.find_all("table", class_="dextable")[2] abilities = table.find_all('tr')[0].text.split(": ")[1].strip().split(" - ") return abilities def get_gen(number): if int(number) <= 151: gen = 'I' elif int(number) <= 251: gen = 'II' elif int(number) <= 386: gen = 'III' elif int(number) <= 493: gen = 'IV' elif int(number) <= 649: gen = 'V' elif int(number) <= 721: gen = 'VI' elif int(number) <= 809: gen = 'VII' else: gen = 'VIII' return gen def get_description(name): name = name.replace("'",'').replace(' ','-').replace('.',' ').strip().replace(' ','-').replace(':','-') if name == 'nidoran♀': name = 'nidoran-female' elif name == 'nidoran♂': name = 'nidoran-male' elif name == 'mimejr': name = 'mime-jr' elif name[:4] == 'tapu': name = name[:4] + '-' + name[4:] url = 'https://www.pokemon.com/us/pokedex/{}' r = requests.get(url.format(name)) soup = BeautifulSoup(r.text, "html.parser") desc = soup.find('div', class_='version-descriptions active').find('p', class_='active').contents[0].strip() return desc def get_evochain(soup): table = soup.find_all("table", class_="evochain")[0] chain = [] if len(table.find_all('tr')) == 1: for img in table.find_all('img'): try: a = img['title'] chain.append(a) except: try: a = img['alt'] chain.append(a) except: pass elif table.find('a')['href'].split('/')[1] == 'pokedex-sm': row = table.find('tr') for col in row.find_all('td')[:7]: try: a = col.find('img')['title'].strip() except: try: a = col.find('img')['alt'].strip() except: number = col.find('a')['href'].split('/')[-1].split('.')[0] a = pokemon_names[int(number)-1].capitalize() chain.append(a) else: for col in table.find_all('td')[:7]: try: a = col.find('img')['title'].strip() except: try: a = col.find('img')['alt'].strip() except: try: number = col.find('img')['src'].split('/')[-1].split('.')[0] a = pokemon_names[int(number)-1].capitalize() except: pass #a = col.find('a')['href'].split('/')[-1].capitalize() chain.append(a) if len(chain)>1: if chain[0] == chain[1]: chain.pop(0) if len(chain)>3 and chain[2] == chain[3]: try: chain.pop(5) except: pass chain.pop(4) chain.pop(3) try: if chain[4] == chain[5]: chain.pop(6) chain.pop(5) except: pass return chain pokedex = pd.DataFrame() url = "https://www.serebii.net/pokemon/legendary.shtml" r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") tables = soup.find_all("table", class_="trainer") status_list = ["sublegendary","legendary","mythical"] status_dict = {} for i in range(len(tables)): status_dict[status_list[i]] = [] for row in tables[i].find_all("tr", recursive=False)[1:-1]: for column in row.find_all("td", recursive=False): item = column.find_all('td')[1].text status_dict[status_list[i]].append(item) for pokemon in pokemon_names: stats = {} try: url = "https://www.serebii.net/pokedex-swsh/{}/#stats" r = requests.get(url.format(pokemon)) soup = BeautifulSoup(r.text, "html.parser") check = soup.find_all("table", class_="dextable")[1] except: url = "https://www.serebii.net/pokedex-sm/{}.shtml" number = "{0:0=3d}".format(pokemon_names.index(pokemon)+1) r = requests.get(url.format(number)) soup = BeautifulSoup(r.text, "html.parser") stats['national_number'] = get_number(soup) stats['gen'] = get_gen(stats['national_number']) stats['english_name'] = get_name(soup) stats['japanese_name'] = get_altnames(soup)['Japan'][0] stats['primary_type'] = get_type(soup)[0] stats['secondary_type'] = get_type(soup)[1] if len(get_type(soup))>1 else None stats['percent_male'] = get_gender(soup)['Male'].replace('%','') if len(get_gender(soup))>1 else None stats['percent_female'] = get_gender(soup)['Female'].replace('%','') if len(get_gender(soup))>1 else None stats['classification'] = get_classification(soup) stats['height_m'] = get_height(soup).split('m')[0] stats['weight_kg'] = get_weight(soup).split('kg')[0] stats['capture_rate'] = get_capture_rate(soup) stats['base_egg_steps'] = get_base_egg_steps(soup) stats['hp'] = get_base_stats(soup)["HP"] stats['attack'] = get_base_stats(soup)["Attack"] stats['defense'] = get_base_stats(soup)["Defense"] stats['sp_attack'] = get_base_stats(soup)["Sp. Attack"] stats['sp_defense'] = get_base_stats(soup)["Sp. Defense"] stats['speed'] = get_base_stats(soup)["Speed"] stats['description'] = get_description(pokemon) abilities = get_abilities(soup) for i in range(3): try: if 'Hidden' not in abilities[i]: stats['abilities_{}'.format(i)] = abilities[i] elif 'Hidden' in abilities[i]: stats['abilities_hidden'] = abilities[i].split('(')[0].strip() except: stats['abilities_{}'.format(i)] = None for i in range(7): try: stats['evochain_{}'.format(i)] = get_evochain(soup)[i] except: stats['evochain_{}'.format(i)] = None legend = get_legendary_status(stats['english_name']) for i in legend.keys(): stats['is_{}'.format(i)] = legend[i] against = get_against(soup) for i in against.keys(): stats['against_{}'.format(i)] = against[i] pokedex = pokedex.append(pd.Series(stats), ignore_index=True) pokemon cols = [ 'national_number', 'gen', 'english_name', 'japanese_name', 'primary_type', 'secondary_type', 'classification', 'percent_male', 'percent_female', 'height_m', 'weight_kg', 'capture_rate', 'base_egg_steps', 'hp', 'attack', 'defense', 'sp_attack', 'sp_defense', 'speed' ] for i in range(3): cols.append('abilities_{}'.format(i)) cols.append('abilities_hidden') for i in against.keys(): cols.append('against_{}'.format(i)) for i in legend.keys(): cols.append('is_{}'.format(i)) for i in range(7): cols.append('evochain_{}'.format(i)) cols.append('description') pokedex[cols].to_csv('pokemon.csv', index=False, encoding='utf-16') ``` Alter data set for radar plot ``` df = pd.read_csv('pokemon.csv', encoding='utf-16') df = pd.melt(df, id_vars=['national_number','english_name'], value_vars=['hp', 'attack', 'defense', 'sp_attack', 'sp_defense','speed'], var_name='stat_name', value_name='stat_value') df.to_csv('pokemon_stats.csv', index=False, encoding='utf-16') ``` Obtain small images of all 898 pokemon ``` import urllib.request for i in range(898): url = "https://assets.pokemon.com/assets/cms2/img/pokedex/detail/{0:0=3d}.png".format(i+1) output = "{0:0=3d}.png".format(i+1) urllib.request.urlretrieve(url, output) ``` Obtain large images of all 898 pokemon ``` import os cwd = os.getcwd() for i in range(len(pokemon_names)): try: url = "https://bulbapedia.bulbagarden.net/wiki/File:{}.png".format("{0:0=3d}".format(i+1)+pokemon_names[i].replace("'","%27").replace(".","._").capitalize()) r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") div = soup.find("div", class_="fullImageLink") target = div.a['href'] output = "/images/{0:0=3d}.png".format(i+1) except: try: url = "https://bulbapedia.bulbagarden.net/wiki/File:{}.png".format("{0:0=3d}".format(i+1)+pokemon_names[i].replace("'","%27").replace(".","._").title()) r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") div = soup.find("div", class_="fullImageLink") target = div.a['href'] output = "/images/{0:0=3d}.png".format(i+1) except: if pokemon_names[i]=='mimejr.': name = 'Mime_Jr' url = "https://bulbapedia.bulbagarden.net/wiki/File:{}.png".format("{0:0=3d}".format(i+1)+name) r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") div = soup.find("div", class_="fullImageLink") target = div.a['href'] output = "/images/{0:0=3d}.png".format(i+1) else: name = "Giratina" url = "https://bulbapedia.bulbagarden.net/wiki/File:{}.png".format("{0:0=3d}".format(i+1)+name+"-Origin") r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") div = soup.find("div", class_="fullImageLink") target = div.a['href'] output = "/images/{0:0=3d}.png".format(i+1+485) try: urllib.request.urlretrieve(target, output) except: if target[0:4] != 'http': target = "https:" + target r = requests.get(target) with open(cwd+output, 'wb') as outfile: outfile.write(r.content) outfile.close() ``` Collect alternative images for pokemon with alt forms (e.g. regional variants, mega evolution, gigantamax) ``` url = 'https://bulbapedia.bulbagarden.net/wiki/Mega_Evolution' base = 'https://bulbapedia.bulbagarden.net' r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") import os cwd = os.getcwd() for i in soup.find_all('a', class_='image'): if '-Mega' in i['href']: url = base+i['href'] output = i['href'].split(':')[-1] r = requests.get(url) soup2 = BeautifulSoup(r.text, "html.parser") div = soup2.find("div", class_="fullImageLink") target = div.a['href'] if target[0:4] != 'http': target = "https:" + target try: urllib.request.urlretrieve(target, output) except: r = requests.get(target) with open(cwd+output, 'wb') as outfile: outfile.write(r.content) outfile.close() url = 'https://bulbapedia.bulbagarden.net/wiki/Primal_Reversion' import os cwd = os.getcwd() for i in soup.find_all('a', class_='image'): if '-Primal' in i['href']: url = base+i['href'] output = i['href'].split(':')[-1] r = requests.get(url) soup2 = BeautifulSoup(r.text, "html.parser") div = soup2.find("div", class_="fullImageLink") target = div.a['href'] if target[0:4] != 'http': target = "https:" + target try: urllib.request.urlretrieve(target, output) except: r = requests.get(target) with open(cwd+output, 'wb') as outfile: outfile.write(r.content) outfile.close() url = 'https://bulbapedia.bulbagarden.net/wiki/List_of_Pok%C3%A9mon_with_form_differences' r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") import os cwd = os.getcwd() for i in soup.find_all('a', class_='image'): if '-' in i['href'] and '.png' in i['href']: url = base+i['href'] output = i['href'].split(':')[-1] r = requests.get(url) soup2 = BeautifulSoup(r.text, "html.parser") div = soup2.find("div", class_="fullImageLink") target = div.a['href'] if target[0:4] != 'http': target = "https:" + target try: urllib.request.urlretrieve(target, output) except: r = requests.get(target) with open(cwd+output, 'wb') as outfile: outfile.write(r.content) outfile.close() url = 'https://bulbapedia.bulbagarden.net/wiki/Regional_form' r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") soup.find('div', class_='version-descriptions active').find('p', class_='active').contents[0].strip() import urllib.request for i in range(898): url = "https://assets.pokemon.com/assets/cms2/img/pokedex/detail/{0:0=3d}.png".format(i+1) output = "{0:0=3d}.png".format(i+1) urllib.request.urlretrieve(url, output) ```
github_jupyter
import pandas as pd import requests from bs4 import BeautifulSoup url = "https://bulbapedia.bulbagarden.net/wiki/List_of_Pokémon_by_National_Pokédex_number" r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") rows = soup.find_all("tr") df = pd.DataFrame() for row in rows: if len(row.find_all("th")) > 1: headers = [header.text.replace('\n','').replace(' ','') for header in row.find_all("th")] elif len(row.find_all("td")) > 1: record = {} for i in range(len(row.find_all("td"))): record[headers[i]] = row.find_all("td")[i].text.replace('\n','').replace(' ','') df = df.append(pd.Series(record), ignore_index=True) cols = ['Ndex', 'MS'] df = df[cols] df.drop_duplicates(inplace=True) pokemon_names = df['MS'].to_list() pokemon_names = [item.lower() for item in pokemon_names] print(pokemon_names[0:5]) url = "https://www.serebii.net/pokedex-swsh/{}/#stats" def get_number(soup): table = soup.find_all("table", class_="dextable")[1] num = table.find_all("tr")[1].find_all("td", class_="fooinfo")[2].text.strip().split('\n')[0].split("#")[-1] return num def get_name(soup): table = soup.find_all("table", class_="dextable")[1] name = table.find_all("tr")[1].find_all("td")[0].text return name def get_altnames(soup): altnames = {} table = soup.find_all("table", class_="dextable")[1] rows = table.find_all("tr")[1].find_all("td")[1].find_all("tr") for row in rows: key = row.find_all("td")[0].text.strip().replace(":","") contents = row.find_all("td")[1].contents value = [x for x in contents if isinstance(x, type(contents[0]))] altnames[key] = value return altnames def get_type(soup): types = [] table = soup.find_all("table", class_="dextable")[1] imgs = table.find_all("img") for img in imgs: types.append(img["src"].split("/")[-1].split(".")[0]) return types def get_gender(soup): gender_ratios = {} table = soup.find_all("table", class_="dextable")[1] try: #genderless pokemon will cause exception rows = table.find_all("tr")[1].find_all("td", class_="fooinfo")[3].contents[0].find_all("tr") for row in rows: key = row.find_all("td")[0].text.split(" ")[0] value = row.find_all("td")[1].text gender_ratios[key] = value except: pass return gender_ratios def get_classification(soup): table = soup.find_all("table", class_="dextable")[1] classification = table.find_all("td", class_="fooinfo")[4].text return classification def get_height(soup): table = soup.find_all("table", class_="dextable")[1] height = table.find_all("td", class_="fooinfo")[5].text.split("\t")[-1] return height def get_weight(soup): table = soup.find_all("table", class_="dextable")[1] weight = table.find_all("td", class_="fooinfo")[6].text.split("\t")[-1] return weight def get_capture_rate(soup): table = soup.find_all("table", class_="dextable")[1] rate = table.find_all("td", class_="fooinfo")[7].text.split("\t")[-1] return rate def get_base_egg_steps(soup): table = soup.find_all("table", class_="dextable")[1] steps = table.find_all("td", class_="fooinfo")[8].text.split("\t")[-1].replace(",","") return steps def get_base_stats(soup): base_stats = {} stat_names = ["HP","Attack","Defense","Sp. Attack","Sp. Defense","Speed"] stats_indices = [] for row in soup.find_all("tr"): if 'Stats' == row.text.replace('\n',''): stats_indices.append(soup.find_all("tr").index(row)) try: columns = soup.find_all("tr")[stats_indices[0]+2].find_all("td") except: columns = soup.find_all("tr")[stats_indices[1]+2].find_all("td") for i in range(len(stat_names)): base_stats[stat_names[i]] = columns[i+1].text return base_stats def get_legendary_status(name): legendary_status = {"sublegendary": 0, "legendary": 0, "mythical": 0} if name in status_dict["sublegendary"]: legendary_status["sublegendary"] = 1 elif name in status_dict["legendary"]: legendary_status["legendary"] = 1 elif name in status_dict["mythical"]: legendary_status["mythical"] = 1 return legendary_status def get_experience_growth(soup): table = soup.find_all("table", class_="dextable")[2].find_all("tr", recursive=False)[3] exp = table.find_all("tr", recursive=False)[3].td.contents[0].split(" ")[0].replace(",","") return exp def get_base_happiness(soup): table = soup.find_all("table", class_="dextable")[2].find_all("tr", recursive=False)[3] happiness = table.find_all("tr", recursive=False)[3].find_all("td",recursive=False)[1].text return happiness def get_against(soup): headers = [] against_dict = {} links = soup.find_all("table", class_="dextable")[3].find_all("tr",recursive=False)[1].find_all("a") for link in links: header = link['href'].split("/")[-1].split(".")[0] headers.append(header) columns = soup.find_all("table", class_="dextable")[3].find_all("tr",recursive=False)[2].find_all('td') for i in range(len(headers)): against_dict[headers[i]] = columns[i].text.split('*')[-1] return against_dict def get_abilities(soup): table = soup.find_all("table", class_="dextable")[2] abilities = table.find_all('tr')[0].text.split(": ")[1].strip().split(" - ") return abilities def get_gen(number): if int(number) <= 151: gen = 'I' elif int(number) <= 251: gen = 'II' elif int(number) <= 386: gen = 'III' elif int(number) <= 493: gen = 'IV' elif int(number) <= 649: gen = 'V' elif int(number) <= 721: gen = 'VI' elif int(number) <= 809: gen = 'VII' else: gen = 'VIII' return gen def get_description(name): name = name.replace("'",'').replace(' ','-').replace('.',' ').strip().replace(' ','-').replace(':','-') if name == 'nidoran♀': name = 'nidoran-female' elif name == 'nidoran♂': name = 'nidoran-male' elif name == 'mimejr': name = 'mime-jr' elif name[:4] == 'tapu': name = name[:4] + '-' + name[4:] url = 'https://www.pokemon.com/us/pokedex/{}' r = requests.get(url.format(name)) soup = BeautifulSoup(r.text, "html.parser") desc = soup.find('div', class_='version-descriptions active').find('p', class_='active').contents[0].strip() return desc def get_evochain(soup): table = soup.find_all("table", class_="evochain")[0] chain = [] if len(table.find_all('tr')) == 1: for img in table.find_all('img'): try: a = img['title'] chain.append(a) except: try: a = img['alt'] chain.append(a) except: pass elif table.find('a')['href'].split('/')[1] == 'pokedex-sm': row = table.find('tr') for col in row.find_all('td')[:7]: try: a = col.find('img')['title'].strip() except: try: a = col.find('img')['alt'].strip() except: number = col.find('a')['href'].split('/')[-1].split('.')[0] a = pokemon_names[int(number)-1].capitalize() chain.append(a) else: for col in table.find_all('td')[:7]: try: a = col.find('img')['title'].strip() except: try: a = col.find('img')['alt'].strip() except: try: number = col.find('img')['src'].split('/')[-1].split('.')[0] a = pokemon_names[int(number)-1].capitalize() except: pass #a = col.find('a')['href'].split('/')[-1].capitalize() chain.append(a) if len(chain)>1: if chain[0] == chain[1]: chain.pop(0) if len(chain)>3 and chain[2] == chain[3]: try: chain.pop(5) except: pass chain.pop(4) chain.pop(3) try: if chain[4] == chain[5]: chain.pop(6) chain.pop(5) except: pass return chain pokedex = pd.DataFrame() url = "https://www.serebii.net/pokemon/legendary.shtml" r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") tables = soup.find_all("table", class_="trainer") status_list = ["sublegendary","legendary","mythical"] status_dict = {} for i in range(len(tables)): status_dict[status_list[i]] = [] for row in tables[i].find_all("tr", recursive=False)[1:-1]: for column in row.find_all("td", recursive=False): item = column.find_all('td')[1].text status_dict[status_list[i]].append(item) for pokemon in pokemon_names: stats = {} try: url = "https://www.serebii.net/pokedex-swsh/{}/#stats" r = requests.get(url.format(pokemon)) soup = BeautifulSoup(r.text, "html.parser") check = soup.find_all("table", class_="dextable")[1] except: url = "https://www.serebii.net/pokedex-sm/{}.shtml" number = "{0:0=3d}".format(pokemon_names.index(pokemon)+1) r = requests.get(url.format(number)) soup = BeautifulSoup(r.text, "html.parser") stats['national_number'] = get_number(soup) stats['gen'] = get_gen(stats['national_number']) stats['english_name'] = get_name(soup) stats['japanese_name'] = get_altnames(soup)['Japan'][0] stats['primary_type'] = get_type(soup)[0] stats['secondary_type'] = get_type(soup)[1] if len(get_type(soup))>1 else None stats['percent_male'] = get_gender(soup)['Male'].replace('%','') if len(get_gender(soup))>1 else None stats['percent_female'] = get_gender(soup)['Female'].replace('%','') if len(get_gender(soup))>1 else None stats['classification'] = get_classification(soup) stats['height_m'] = get_height(soup).split('m')[0] stats['weight_kg'] = get_weight(soup).split('kg')[0] stats['capture_rate'] = get_capture_rate(soup) stats['base_egg_steps'] = get_base_egg_steps(soup) stats['hp'] = get_base_stats(soup)["HP"] stats['attack'] = get_base_stats(soup)["Attack"] stats['defense'] = get_base_stats(soup)["Defense"] stats['sp_attack'] = get_base_stats(soup)["Sp. Attack"] stats['sp_defense'] = get_base_stats(soup)["Sp. Defense"] stats['speed'] = get_base_stats(soup)["Speed"] stats['description'] = get_description(pokemon) abilities = get_abilities(soup) for i in range(3): try: if 'Hidden' not in abilities[i]: stats['abilities_{}'.format(i)] = abilities[i] elif 'Hidden' in abilities[i]: stats['abilities_hidden'] = abilities[i].split('(')[0].strip() except: stats['abilities_{}'.format(i)] = None for i in range(7): try: stats['evochain_{}'.format(i)] = get_evochain(soup)[i] except: stats['evochain_{}'.format(i)] = None legend = get_legendary_status(stats['english_name']) for i in legend.keys(): stats['is_{}'.format(i)] = legend[i] against = get_against(soup) for i in against.keys(): stats['against_{}'.format(i)] = against[i] pokedex = pokedex.append(pd.Series(stats), ignore_index=True) pokemon cols = [ 'national_number', 'gen', 'english_name', 'japanese_name', 'primary_type', 'secondary_type', 'classification', 'percent_male', 'percent_female', 'height_m', 'weight_kg', 'capture_rate', 'base_egg_steps', 'hp', 'attack', 'defense', 'sp_attack', 'sp_defense', 'speed' ] for i in range(3): cols.append('abilities_{}'.format(i)) cols.append('abilities_hidden') for i in against.keys(): cols.append('against_{}'.format(i)) for i in legend.keys(): cols.append('is_{}'.format(i)) for i in range(7): cols.append('evochain_{}'.format(i)) cols.append('description') pokedex[cols].to_csv('pokemon.csv', index=False, encoding='utf-16') df = pd.read_csv('pokemon.csv', encoding='utf-16') df = pd.melt(df, id_vars=['national_number','english_name'], value_vars=['hp', 'attack', 'defense', 'sp_attack', 'sp_defense','speed'], var_name='stat_name', value_name='stat_value') df.to_csv('pokemon_stats.csv', index=False, encoding='utf-16') import urllib.request for i in range(898): url = "https://assets.pokemon.com/assets/cms2/img/pokedex/detail/{0:0=3d}.png".format(i+1) output = "{0:0=3d}.png".format(i+1) urllib.request.urlretrieve(url, output) import os cwd = os.getcwd() for i in range(len(pokemon_names)): try: url = "https://bulbapedia.bulbagarden.net/wiki/File:{}.png".format("{0:0=3d}".format(i+1)+pokemon_names[i].replace("'","%27").replace(".","._").capitalize()) r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") div = soup.find("div", class_="fullImageLink") target = div.a['href'] output = "/images/{0:0=3d}.png".format(i+1) except: try: url = "https://bulbapedia.bulbagarden.net/wiki/File:{}.png".format("{0:0=3d}".format(i+1)+pokemon_names[i].replace("'","%27").replace(".","._").title()) r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") div = soup.find("div", class_="fullImageLink") target = div.a['href'] output = "/images/{0:0=3d}.png".format(i+1) except: if pokemon_names[i]=='mimejr.': name = 'Mime_Jr' url = "https://bulbapedia.bulbagarden.net/wiki/File:{}.png".format("{0:0=3d}".format(i+1)+name) r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") div = soup.find("div", class_="fullImageLink") target = div.a['href'] output = "/images/{0:0=3d}.png".format(i+1) else: name = "Giratina" url = "https://bulbapedia.bulbagarden.net/wiki/File:{}.png".format("{0:0=3d}".format(i+1)+name+"-Origin") r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") div = soup.find("div", class_="fullImageLink") target = div.a['href'] output = "/images/{0:0=3d}.png".format(i+1+485) try: urllib.request.urlretrieve(target, output) except: if target[0:4] != 'http': target = "https:" + target r = requests.get(target) with open(cwd+output, 'wb') as outfile: outfile.write(r.content) outfile.close() url = 'https://bulbapedia.bulbagarden.net/wiki/Mega_Evolution' base = 'https://bulbapedia.bulbagarden.net' r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") import os cwd = os.getcwd() for i in soup.find_all('a', class_='image'): if '-Mega' in i['href']: url = base+i['href'] output = i['href'].split(':')[-1] r = requests.get(url) soup2 = BeautifulSoup(r.text, "html.parser") div = soup2.find("div", class_="fullImageLink") target = div.a['href'] if target[0:4] != 'http': target = "https:" + target try: urllib.request.urlretrieve(target, output) except: r = requests.get(target) with open(cwd+output, 'wb') as outfile: outfile.write(r.content) outfile.close() url = 'https://bulbapedia.bulbagarden.net/wiki/Primal_Reversion' import os cwd = os.getcwd() for i in soup.find_all('a', class_='image'): if '-Primal' in i['href']: url = base+i['href'] output = i['href'].split(':')[-1] r = requests.get(url) soup2 = BeautifulSoup(r.text, "html.parser") div = soup2.find("div", class_="fullImageLink") target = div.a['href'] if target[0:4] != 'http': target = "https:" + target try: urllib.request.urlretrieve(target, output) except: r = requests.get(target) with open(cwd+output, 'wb') as outfile: outfile.write(r.content) outfile.close() url = 'https://bulbapedia.bulbagarden.net/wiki/List_of_Pok%C3%A9mon_with_form_differences' r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") import os cwd = os.getcwd() for i in soup.find_all('a', class_='image'): if '-' in i['href'] and '.png' in i['href']: url = base+i['href'] output = i['href'].split(':')[-1] r = requests.get(url) soup2 = BeautifulSoup(r.text, "html.parser") div = soup2.find("div", class_="fullImageLink") target = div.a['href'] if target[0:4] != 'http': target = "https:" + target try: urllib.request.urlretrieve(target, output) except: r = requests.get(target) with open(cwd+output, 'wb') as outfile: outfile.write(r.content) outfile.close() url = 'https://bulbapedia.bulbagarden.net/wiki/Regional_form' r = requests.get(url) soup = BeautifulSoup(r.text, "html.parser") soup.find('div', class_='version-descriptions active').find('p', class_='active').contents[0].strip() import urllib.request for i in range(898): url = "https://assets.pokemon.com/assets/cms2/img/pokedex/detail/{0:0=3d}.png".format(i+1) output = "{0:0=3d}.png".format(i+1) urllib.request.urlretrieve(url, output)
0.277767
0.595463
# MCMC Basics Here is a quick outline of how to use the MCMC tool in `henrietta`. ``` import henrietta as hsl ``` Before we can create a light curve model using an MCMC, we need to download some light curve data to work with. ``` lc = hsl.download_kepler_lc('Kepler-10', quarter=1) ``` Then, we need to define our custom BATMAN model, including which parameters we want to fix and which parameters we want to fit. ``` astropy_model = hsl.setup_transit_model() ``` Let's check out what the default model parameters are: ``` astropy_model ``` Kepler-10b has a period of 0.8375 days and an impact parameter of 0.3. Let's fix those values, but let the `radius`, `t0`, and `a` be free parameters, with ranges defined around the anticipated true value: ``` astropy_model = hsl.setup_transit_model(period = 0.8375, b = 0.3, t0 = [0.09,0.12], radius = [0.005,0.02], a = [1.0,4.0]) ``` Now that we have our `astropy_model` object and our light curve data in the form of a `lightkurve` object, we are ready to model these parameters with a Markov-Chain Monte Carlo. The `mcmc_fit` function takes the astropy model, the light curve data, and one additional argument that will determine whether or not output plots will be saved to the local directory. You might want to start with a small number of steps in the MCMC chain (say 100 or 1000) to see how long it takes, and then scale up to longer MCMC runs from there. This `mcmc_fit` function returns two useful variables: `max_likelihood` is a dictionary with keys equal to the names of the free parameters. This dictionary contains 3 values for each free parameter - the maximum likelihood value (detered as the median sampled parameter) and the upper and lower 1-sigma uncertainty parameter values. `samples` is an object that contains many different tools for examining the MCMC results. For an in-depth look at the capabilities of this object, the user should consult the emcee handbook: http://dfm.io/emcee/current/ ``` max_likelihood, samples = hsl.mcmc_fit(astropy_model,lc,saveplots=True, nsteps=1000) ``` If we open up our current directory, we should see some `.pdf` files that contain summaries of diagnostic plots made by this MCMC. We can also look at the `max_likelihood` variable now, to see the central 1-sigma ranges for each of our fitted parameters. ``` max_likelihood ``` There's *lots* to learn about MCMC fitting of transits, but this tool might be a handy way to get started!
github_jupyter
import henrietta as hsl lc = hsl.download_kepler_lc('Kepler-10', quarter=1) astropy_model = hsl.setup_transit_model() astropy_model astropy_model = hsl.setup_transit_model(period = 0.8375, b = 0.3, t0 = [0.09,0.12], radius = [0.005,0.02], a = [1.0,4.0]) max_likelihood, samples = hsl.mcmc_fit(astropy_model,lc,saveplots=True, nsteps=1000) max_likelihood
0.416559
0.986442
``` import matplotlib.pyplot as plt import numpy as np import json import re ``` # Классификация текста по канонам Соберём данные заново и помимо классификации сделаем визуализацию. Класса будет два: Алкомодель и РИА новости; разница будет существенная. Только на этот раз всё сделаем по гайду от sklearn: http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html ``` from get_tweets import get_tweets import csv example_string = "@huabf Get Rid of those каРапузов !" def purify(str): # delete all URLs str = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', str, flags=re.MULTILINE) #delete all usernames str = re.sub(r'@\w+', '', str) return str.lower() print(purify(example_string)) accounts = ["alcomodel", "rianru"] def generate_data(accounts): train_data = [] for account in accounts: with open("unsorted/{0}_tweets.csv".format(account)) as fp: raw_data = csv.reader(fp, delimiter = '|') for line in raw_data: if line[0:1] != "RT": #ignore retweets train_data.append([purify(line[3]), account]) train_target = [train_data[i][1] for i in range(len(train_data))] train_data = [train_data[i][0] for i in range(len(train_data))] return train_data, train_target train_data, train_target = generate_data(accounts) print(len(train_data)) from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform(train_data) X_train_counts.shape from sklearn.feature_extraction.text import TfidfTransformer tf_transformer = TfidfTransformer() X_train_tf = tf_transformer.fit_transform(X_train_counts) X_train_tf.shape from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB().fit(X_train_tf, train_target) ``` Теперь можно заставить классификатор предсказывать ``` def predict(str, clf): X_new_counts = count_vect.transform(str) X_new_tfidf = tf_transformer.transform(X_new_counts) predicted = clf.predict(X_new_tfidf) for doc, category in zip(str, predicted): print("%r => %s" % (doc, category)) test_data = [] test_data.append("Ладно, допустим, лёд между нами уже растаял, А ЖИР-ТО МОЙ КОГДА НАЧНЁТ ТАЯТЬ???!!! Или это не так работает?") test_data.append("Меня чуть не сбил велосипедист на пешеходном переходе. Ладно. Хорошо.") test_data.append("Премьер Франции заявил, что Ферран останется в кабмине, несмотря на скандал") print(predict(test_data, clf)) ``` # Попытка визуализации ``` print(X_train_counts.toarray()[0:199].shape[0]) X1 = np.sum(X_train_tf.toarray()[0:199], axis=0) X2 = np.sum(X_train_tf.toarray()[200:399], axis=0) print(len(X1), len(X2)) plt.figure() plt.scatter(range(len(X1)), X1, color = "blue") plt.scatter(range(len(X2)), X2, color = "red") plt.show() ``` # Обучение на всех имеющихся классах Для начала превратим все имеющиеся у нас данные в один документ с форматом ["класс", "твит"] Вариант для собранных данных. P.S. Вариант для сбора данных уже есть в collect data.ipynb ``` import os train_data = [] for cat in os.listdir("classes"): if cat[0] != '.': for account in os.listdir("classes/%s" % cat): with open("classes/%s/%s" % (cat, account)) as fp: raw_data = csv.reader(fp, delimiter = '|') for line in raw_data: if line[0:1] != "RT": # ignore retweets train_data.append([str(purify(line[3])), cat]) train_target = [train_data[i][1] for i in range(len(train_data))] from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform([train_data[i][0] for i in range(len(train_data))]) X_train_counts.shape from sklearn.feature_extraction.text import TfidfTransformer tf_transformer = TfidfTransformer() X_train_tf = tf_transformer.fit_transform(X_train_counts) X_train_tf.shape from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB().fit(X_train_tf, train_target) def predict(arr, clf): X_new_counts = count_vect.transform(arr) X_new_tfidf = tf_transformer.transform(X_new_counts) predicted = clf.predict(X_new_tfidf) for doc, category in zip(arr, predicted): print("%r => %s" % (doc, category)) def test_perfomance(test_data, clf=clf): data = [test_data[i][0] for i in range(len(test_data))] target = [test_data[i][1] for i in range(len(test_data))] X_new_counts = count_vect.transform(data) X_new_tfidf = tf_transformer.transform(X_new_counts) predicted = clf.predict(X_new_tfidf) counter_total = 0 counter_correct = 0 for doc, category, tar in zip(data, predicted, target): counter_total += 1 if category == tar: counter_correct += 1 print("Всего: %s, Правильно: %s" % (counter_total, counter_correct)) print("Вероятность правильного определения: %.2f%%" % (counter_correct/counter_total*100), ) test_perfomance(train_data, clf) ``` Построим Pipeline, таким образом объединим токенизацию, векторизацию и функцию весов в одну функцию. from sklearn.pipeline import Pipeline text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB()), ]) data = [train_data[i][0] for i in range(len(train_data))] target = [train_data[i][1] for i in range(len(train_data))] text_clf.fit(data, target) ``` from sklearn.linear_model import SGDClassifier svm_clf = SGDClassifier(loss = 'perceptron', n_iter=10) svm_clf.fit(X_train_tf, train_target) ``` predicted = text_clf.predict(train_data) np.mean(predicted == train_arget) ``` test_perfomance(train_data, svm_clf) ``` # Опреление пола пользователя Имеется массив данных с бинарным разделением признаков: пользователь мужчина и пользователь женщина. Попробуем из этих данных узнать пол пользователя ``` gender_data = [] for cat in os.listdir("gender"): if cat[0] != '.': for account in os.listdir("gender/%s" % cat): with open("gender/%s/%s" % (cat, account)) as fp: raw_data = csv.reader(fp, delimiter = '|') for line in raw_data: if line[0:1] != "RT": # ignore retweets gender_data.append([purify(line[3]), cat]) gender_target = [gender_data[i][1] for i in range(len(gender_data))] print(gender_data[4][0]) from sklearn.pipeline import Pipeline gender_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier(loss='perceptron', penalty='l2', alpha=1e-3, n_iter=5, random_state=42)), ]) text_gender = [str(gender_data[i][0]) for i in range(len(gender_data))] print(str(text_gender[0])) gender_clf = gender_clf.fit(text_gender, gender_target) predicted = gender_clf.predict(text_gender) np.mean(predicted == gender_target) ``` # Получение результатов ``` from get_tweets import get_tweets ``` Теперь необходимо сделать вывод полученной информации для выбранного пользователем акканута твиттер ``` import matplotlib.pyplot as plt from collections import Counter import tweepy import config auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret) auth.set_access_token(config.access_key, config.access_secret) api = tweepy.API(auth) def classify(username): get_tweets(username, folder = "unsorted") tweets = [] with open("unsorted/{0}_tweets.csv".format(username)) as fp: raw_data = csv.reader(fp, delimiter = '|') for line in raw_data: if line[0:1] != "RT": #ignore retweets tweets.append(purify(line[3])) #tweets are ready X_new_counts = count_vect.transform(tweets) X_new_tfidf = tf_transformer.transform(X_new_counts) predicted = clf.predict(X_new_tfidf) result = [] for doc, category in zip(tweets, predicted): result.append(category) z = Counter(result) print("\n") user = api.get_user(username) print(user.name, ", ", user.followers_count, "подписчиков.") print("Интересы: ") labels = list(z) values = list(z.values()) explode = [np.log(x)/50 for x in values] plt.pie(values, labels = labels, explode = explode, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') plt.show() for value, label in zip(values, labels): print(label, " : %1.1f%%" % (value/len(tweets)*100)) gender_predict = gender_clf.predict(tweets) gender_result=[] for doc, category in zip(tweets, gender_predict): gender_result.append(category) print("Gender: ", Counter(gender_result).most_common()[0][0]) classify("brupsen") classify("medvedevrussia") classify("kinamanka") !python3 classify.py from sklearn.externals import joblib import matplotlib.pyplot as plt from collections import Counter import tweepy import config import csv import sys from get_tweets import get_tweets from learn import purify import numpy as np auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret) auth.set_access_token(config.access_key, config.access_secret) api = tweepy.API(auth) def classify(username): if len(sys.argv) == 2: username = sys.argv[1] try: text_clf = joblib.load("models/text_model.pkl") gender_clf = joblib.load("models/gender_model.pkl") except FileNotFoundError: sys.exit("Не найден файл модели") get_tweets(username, folder="unsorted") tweets = [] with open("unsorted/{0}_tweets.csv".format(username)) as fp: raw_data = csv.reader(fp, delimiter='|') for line in raw_data: if line[0:1] != "RT": # ignore retweets tweets.append(purify(line[3])) predicted = text_clf.predict(tweets) result = [] for doc, category in zip(tweets, predicted): result.append(category) z = Counter(result) user = api.get_user(username) labels = list(z) values = list(z.values()) explode = [np.log(x) / 50 for x in values] plt.pie(values, labels=labels, explode=explode, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') print("\n") print(user.name, ", ", user.followers_count, "подписчиков.") print("Интересы: ") for value, label in zip(values, labels): print(label, " : %1.1f%%" % (value / len(tweets) * 100)) gender_predict = gender_clf.predict(tweets) gender_result = [] for doc, category in zip(tweets, gender_predict): gender_result.append(category) print("Пол: ", Counter(gender_result).most_common()[0][0]) plt.show() classify("medvedevrussia") classify("Stalingulag") classify("mudakoff") classify("navalny") classify("meduzaproject") classify("rianru") classify("whysodisgusting") classify("alcomodel") classify("Narisovana_ka") ```
github_jupyter
import matplotlib.pyplot as plt import numpy as np import json import re from get_tweets import get_tweets import csv example_string = "@huabf Get Rid of those каРапузов !" def purify(str): # delete all URLs str = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', str, flags=re.MULTILINE) #delete all usernames str = re.sub(r'@\w+', '', str) return str.lower() print(purify(example_string)) accounts = ["alcomodel", "rianru"] def generate_data(accounts): train_data = [] for account in accounts: with open("unsorted/{0}_tweets.csv".format(account)) as fp: raw_data = csv.reader(fp, delimiter = '|') for line in raw_data: if line[0:1] != "RT": #ignore retweets train_data.append([purify(line[3]), account]) train_target = [train_data[i][1] for i in range(len(train_data))] train_data = [train_data[i][0] for i in range(len(train_data))] return train_data, train_target train_data, train_target = generate_data(accounts) print(len(train_data)) from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform(train_data) X_train_counts.shape from sklearn.feature_extraction.text import TfidfTransformer tf_transformer = TfidfTransformer() X_train_tf = tf_transformer.fit_transform(X_train_counts) X_train_tf.shape from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB().fit(X_train_tf, train_target) def predict(str, clf): X_new_counts = count_vect.transform(str) X_new_tfidf = tf_transformer.transform(X_new_counts) predicted = clf.predict(X_new_tfidf) for doc, category in zip(str, predicted): print("%r => %s" % (doc, category)) test_data = [] test_data.append("Ладно, допустим, лёд между нами уже растаял, А ЖИР-ТО МОЙ КОГДА НАЧНЁТ ТАЯТЬ???!!! Или это не так работает?") test_data.append("Меня чуть не сбил велосипедист на пешеходном переходе. Ладно. Хорошо.") test_data.append("Премьер Франции заявил, что Ферран останется в кабмине, несмотря на скандал") print(predict(test_data, clf)) print(X_train_counts.toarray()[0:199].shape[0]) X1 = np.sum(X_train_tf.toarray()[0:199], axis=0) X2 = np.sum(X_train_tf.toarray()[200:399], axis=0) print(len(X1), len(X2)) plt.figure() plt.scatter(range(len(X1)), X1, color = "blue") plt.scatter(range(len(X2)), X2, color = "red") plt.show() import os train_data = [] for cat in os.listdir("classes"): if cat[0] != '.': for account in os.listdir("classes/%s" % cat): with open("classes/%s/%s" % (cat, account)) as fp: raw_data = csv.reader(fp, delimiter = '|') for line in raw_data: if line[0:1] != "RT": # ignore retweets train_data.append([str(purify(line[3])), cat]) train_target = [train_data[i][1] for i in range(len(train_data))] from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform([train_data[i][0] for i in range(len(train_data))]) X_train_counts.shape from sklearn.feature_extraction.text import TfidfTransformer tf_transformer = TfidfTransformer() X_train_tf = tf_transformer.fit_transform(X_train_counts) X_train_tf.shape from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB().fit(X_train_tf, train_target) def predict(arr, clf): X_new_counts = count_vect.transform(arr) X_new_tfidf = tf_transformer.transform(X_new_counts) predicted = clf.predict(X_new_tfidf) for doc, category in zip(arr, predicted): print("%r => %s" % (doc, category)) def test_perfomance(test_data, clf=clf): data = [test_data[i][0] for i in range(len(test_data))] target = [test_data[i][1] for i in range(len(test_data))] X_new_counts = count_vect.transform(data) X_new_tfidf = tf_transformer.transform(X_new_counts) predicted = clf.predict(X_new_tfidf) counter_total = 0 counter_correct = 0 for doc, category, tar in zip(data, predicted, target): counter_total += 1 if category == tar: counter_correct += 1 print("Всего: %s, Правильно: %s" % (counter_total, counter_correct)) print("Вероятность правильного определения: %.2f%%" % (counter_correct/counter_total*100), ) test_perfomance(train_data, clf) from sklearn.linear_model import SGDClassifier svm_clf = SGDClassifier(loss = 'perceptron', n_iter=10) svm_clf.fit(X_train_tf, train_target) test_perfomance(train_data, svm_clf) gender_data = [] for cat in os.listdir("gender"): if cat[0] != '.': for account in os.listdir("gender/%s" % cat): with open("gender/%s/%s" % (cat, account)) as fp: raw_data = csv.reader(fp, delimiter = '|') for line in raw_data: if line[0:1] != "RT": # ignore retweets gender_data.append([purify(line[3]), cat]) gender_target = [gender_data[i][1] for i in range(len(gender_data))] print(gender_data[4][0]) from sklearn.pipeline import Pipeline gender_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier(loss='perceptron', penalty='l2', alpha=1e-3, n_iter=5, random_state=42)), ]) text_gender = [str(gender_data[i][0]) for i in range(len(gender_data))] print(str(text_gender[0])) gender_clf = gender_clf.fit(text_gender, gender_target) predicted = gender_clf.predict(text_gender) np.mean(predicted == gender_target) from get_tweets import get_tweets import matplotlib.pyplot as plt from collections import Counter import tweepy import config auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret) auth.set_access_token(config.access_key, config.access_secret) api = tweepy.API(auth) def classify(username): get_tweets(username, folder = "unsorted") tweets = [] with open("unsorted/{0}_tweets.csv".format(username)) as fp: raw_data = csv.reader(fp, delimiter = '|') for line in raw_data: if line[0:1] != "RT": #ignore retweets tweets.append(purify(line[3])) #tweets are ready X_new_counts = count_vect.transform(tweets) X_new_tfidf = tf_transformer.transform(X_new_counts) predicted = clf.predict(X_new_tfidf) result = [] for doc, category in zip(tweets, predicted): result.append(category) z = Counter(result) print("\n") user = api.get_user(username) print(user.name, ", ", user.followers_count, "подписчиков.") print("Интересы: ") labels = list(z) values = list(z.values()) explode = [np.log(x)/50 for x in values] plt.pie(values, labels = labels, explode = explode, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') plt.show() for value, label in zip(values, labels): print(label, " : %1.1f%%" % (value/len(tweets)*100)) gender_predict = gender_clf.predict(tweets) gender_result=[] for doc, category in zip(tweets, gender_predict): gender_result.append(category) print("Gender: ", Counter(gender_result).most_common()[0][0]) classify("brupsen") classify("medvedevrussia") classify("kinamanka") !python3 classify.py from sklearn.externals import joblib import matplotlib.pyplot as plt from collections import Counter import tweepy import config import csv import sys from get_tweets import get_tweets from learn import purify import numpy as np auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret) auth.set_access_token(config.access_key, config.access_secret) api = tweepy.API(auth) def classify(username): if len(sys.argv) == 2: username = sys.argv[1] try: text_clf = joblib.load("models/text_model.pkl") gender_clf = joblib.load("models/gender_model.pkl") except FileNotFoundError: sys.exit("Не найден файл модели") get_tweets(username, folder="unsorted") tweets = [] with open("unsorted/{0}_tweets.csv".format(username)) as fp: raw_data = csv.reader(fp, delimiter='|') for line in raw_data: if line[0:1] != "RT": # ignore retweets tweets.append(purify(line[3])) predicted = text_clf.predict(tweets) result = [] for doc, category in zip(tweets, predicted): result.append(category) z = Counter(result) user = api.get_user(username) labels = list(z) values = list(z.values()) explode = [np.log(x) / 50 for x in values] plt.pie(values, labels=labels, explode=explode, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') print("\n") print(user.name, ", ", user.followers_count, "подписчиков.") print("Интересы: ") for value, label in zip(values, labels): print(label, " : %1.1f%%" % (value / len(tweets) * 100)) gender_predict = gender_clf.predict(tweets) gender_result = [] for doc, category in zip(tweets, gender_predict): gender_result.append(category) print("Пол: ", Counter(gender_result).most_common()[0][0]) plt.show() classify("medvedevrussia") classify("Stalingulag") classify("mudakoff") classify("navalny") classify("meduzaproject") classify("rianru") classify("whysodisgusting") classify("alcomodel") classify("Narisovana_ka")
0.326486
0.791982
``` # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session ``` ## Multilayer Perceptron ``` from keras.utils import plot_model from keras.models import Model from keras.layers import Input from keras.layers import Dense visible = Input(shape=(2,)) hidden1 = Dense(10, activation='relu')(visible) hidden2 = Dense(20, activation='relu')(hidden1) hidden3 = Dense(10, activation='relu')(hidden2) output = Dense(1, activation='sigmoid')(hidden3) model = Model(inputs=visible, outputs=output) model.summary() plot_model(model, to_file='multilayer_perceptron_graph.png') ``` ## Convolution Neural Network ``` from keras.utils import plot_model from keras.models import Model from keras.layers import Input from keras.layers import Dense from keras.layers import Flatten from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D visible = Input(shape=(64,64,1)) conv1 = Conv2D(32, (4,4), activation='relu')(visible) pool1 = MaxPooling2D()(conv1) conv2 = Conv2D(16, (4,4), activation='relu')(pool1) pool2 = MaxPooling2D()(conv2) flat1 = Flatten()(pool2) hidden1 = Dense(10, activation='relu')(flat1) output = Dense(1, activation='sigmoid')(hidden1) model = Model(inputs=visible, outputs=output) model.summary() plot_model(model, to_file='convolutional_neural_network.png') ``` ## Recurrent Neural Network ``` from keras.utils import plot_model from keras.models import Model from keras.layers import Input from keras.layers import Dense from keras.layers.recurrent import LSTM visible = Input(shape=(100,1)) hidden1 = LSTM(10)(visible) hidden2 = Dense(20, activation='relu')(hidden1) output = Dense(1, activation='sigmoid')(hidden2) model = Model(inputs=visible, outputs=output) model.summary() plot_model(model, to_file='recurrent_neural_network.png') ```
github_jupyter
# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from keras.utils import plot_model from keras.models import Model from keras.layers import Input from keras.layers import Dense visible = Input(shape=(2,)) hidden1 = Dense(10, activation='relu')(visible) hidden2 = Dense(20, activation='relu')(hidden1) hidden3 = Dense(10, activation='relu')(hidden2) output = Dense(1, activation='sigmoid')(hidden3) model = Model(inputs=visible, outputs=output) model.summary() plot_model(model, to_file='multilayer_perceptron_graph.png') from keras.utils import plot_model from keras.models import Model from keras.layers import Input from keras.layers import Dense from keras.layers import Flatten from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D visible = Input(shape=(64,64,1)) conv1 = Conv2D(32, (4,4), activation='relu')(visible) pool1 = MaxPooling2D()(conv1) conv2 = Conv2D(16, (4,4), activation='relu')(pool1) pool2 = MaxPooling2D()(conv2) flat1 = Flatten()(pool2) hidden1 = Dense(10, activation='relu')(flat1) output = Dense(1, activation='sigmoid')(hidden1) model = Model(inputs=visible, outputs=output) model.summary() plot_model(model, to_file='convolutional_neural_network.png') from keras.utils import plot_model from keras.models import Model from keras.layers import Input from keras.layers import Dense from keras.layers.recurrent import LSTM visible = Input(shape=(100,1)) hidden1 = LSTM(10)(visible) hidden2 = Dense(20, activation='relu')(hidden1) output = Dense(1, activation='sigmoid')(hidden2) model = Model(inputs=visible, outputs=output) model.summary() plot_model(model, to_file='recurrent_neural_network.png')
0.735357
0.500854
## Tracking Callbacks ``` from fastai.gen_doc.nbdoc import * from fastai.vision import * from fastai.callbacks import * ``` This module regroups the callbacks that track one of the metrics computed at the end of each epoch to take some decision about training. To show examples of use, we'll use our sample of MNIST and a simple cnn model. ``` path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) show_doc(TerminateOnNaNCallback) ``` Sometimes, training diverges and the loss goes to nan. In that case, there's no point continuing, so this callback stops the training. ``` model = simple_cnn((3,16,16,2)) learn = Learner(data, model, metrics=[accuracy]) learn.fit_one_cycle(1,1e4) ``` Using it prevents that situation to happen. ``` model = simple_cnn((3,16,16,2)) learn = Learner(data, model, metrics=[accuracy], callbacks=[TerminateOnNaNCallback()]) learn.fit(2,1e4) ``` ### Callback methods You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. ``` show_doc(TerminateOnNaNCallback.on_batch_end) show_doc(TerminateOnNaNCallback.on_epoch_end) show_doc(EarlyStoppingCallback) ``` This callback tracks the quantity in `monitor` during the training of `learn`. `mode` can be forced to 'min' or 'max' but will automatically try to determine if the quantity should be the lowest possible (validation loss) or the highest possible (accuracy). Will stop training after `patience` epochs if the quantity hasn't improved by `min_delta`. ``` model = simple_cnn((3,16,16,2)) learn = Learner(data, model, metrics=[accuracy], callback_fns=[partial(EarlyStoppingCallback, monitor='accuracy', min_delta=0.01, patience=3)]) learn.fit(50,1e-42) ``` ### Callback methods You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. ``` show_doc(EarlyStoppingCallback.on_train_begin) show_doc(EarlyStoppingCallback.on_epoch_end) show_doc(SaveModelCallback) ``` This callback tracks the quantity in `monitor` during the training of `learn`. `mode` can be forced to 'min' or 'max' but will automatically try to determine if the quantity should be the lowest possible (validation loss) or the highest possible (accuracy). Will save the model in `name` whenever determined by `every` ('improvement' or 'epoch'). Loads the best model at the end of training is `every='improvement'`. ``` model = simple_cnn((3,16,16,2)) learn = Learner(data, model, metrics=[accuracy]) learn.fit_one_cycle(5,1e-4, callbacks=[SaveModelCallback(learn, every='epoch', monitor='accuracy', name='model')]) ``` Choosing `every='epoch'` saves an individual model at the end of each epoch. ``` !ls ~/.fastai/data/mnist_sample/models learn.fit_one_cycle(5,1e-4, callbacks=[SaveModelCallback(learn, every='improvement', monitor='accuracy', name='best')]) ``` Choosing `every='improvement'` saves the single best model out of all epochs during training. ``` !ls ~/.fastai/data/mnist_sample/models ``` ### Callback methods You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. ``` show_doc(SaveModelCallback.on_epoch_end) show_doc(SaveModelCallback.on_train_end) show_doc(ReduceLROnPlateauCallback) ``` This callback tracks the quantity in `monitor` during the training of `learn`. `mode` can be forced to 'min' or 'max' but will automatically try to determine if the quantity should be the lowest possible (validation loss) or the highest possible (accuracy). Will reduce the learning rate by `factor` after `patience` epochs if the quantity hasn't improved by `min_delta`. ### Callback methods You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. ``` show_doc(ReduceLROnPlateauCallback.on_train_begin) show_doc(ReduceLROnPlateauCallback.on_epoch_end) show_doc(TrackerCallback) show_doc(TrackerCallback.get_monitor_value) ``` ### Callback methods You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. ``` show_doc(TrackerCallback.on_train_begin) ``` ## Undocumented Methods - Methods moved below this line will intentionally be hidden ## New Methods - Please document or move to the undocumented section
github_jupyter
from fastai.gen_doc.nbdoc import * from fastai.vision import * from fastai.callbacks import * path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) show_doc(TerminateOnNaNCallback) model = simple_cnn((3,16,16,2)) learn = Learner(data, model, metrics=[accuracy]) learn.fit_one_cycle(1,1e4) model = simple_cnn((3,16,16,2)) learn = Learner(data, model, metrics=[accuracy], callbacks=[TerminateOnNaNCallback()]) learn.fit(2,1e4) show_doc(TerminateOnNaNCallback.on_batch_end) show_doc(TerminateOnNaNCallback.on_epoch_end) show_doc(EarlyStoppingCallback) model = simple_cnn((3,16,16,2)) learn = Learner(data, model, metrics=[accuracy], callback_fns=[partial(EarlyStoppingCallback, monitor='accuracy', min_delta=0.01, patience=3)]) learn.fit(50,1e-42) show_doc(EarlyStoppingCallback.on_train_begin) show_doc(EarlyStoppingCallback.on_epoch_end) show_doc(SaveModelCallback) model = simple_cnn((3,16,16,2)) learn = Learner(data, model, metrics=[accuracy]) learn.fit_one_cycle(5,1e-4, callbacks=[SaveModelCallback(learn, every='epoch', monitor='accuracy', name='model')]) !ls ~/.fastai/data/mnist_sample/models learn.fit_one_cycle(5,1e-4, callbacks=[SaveModelCallback(learn, every='improvement', monitor='accuracy', name='best')]) !ls ~/.fastai/data/mnist_sample/models show_doc(SaveModelCallback.on_epoch_end) show_doc(SaveModelCallback.on_train_end) show_doc(ReduceLROnPlateauCallback) show_doc(ReduceLROnPlateauCallback.on_train_begin) show_doc(ReduceLROnPlateauCallback.on_epoch_end) show_doc(TrackerCallback) show_doc(TrackerCallback.get_monitor_value) show_doc(TrackerCallback.on_train_begin)
0.575349
0.917562
## Now You Code 1: Simple Plotting In this first now you code, you will execute some simple plotting in matplotlib and plot.ly In most cases you will be writing 1-2 lines of code. you will spend the majority of time looking through the documentation here: - http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html - https://plot.ly/pandas/ Also try interactive help: `help(eye_df.plot)` for pandas and `help(eye_df.iplot)` for plot.ly ``` import pandas as pd import plotly as py import matplotlib as plt import cufflinks as cf %matplotlib inline # setup the credentials # Sign up for a FREE plot.ly account and get your API credentials. https://plot.ly/settings/api py.tools.set_credentials_file(username='????', api_key='????') eye_data = [ {'Eye Color' : 'Blue', 'Count' : 190 }, {'Eye Color' : 'Brown', 'Count' : 245 }, {'Eye Color' : 'Green', 'Count' : 64 }, {'Eye Color' : 'Hazel', 'Count' : 93 } ] eye_df = pd.DataFrame(eye_data) eye_df hair_df = pd.DataFrame( {'Hair Color' :pd.Series( ['Black', 'Blond', 'Brown', 'Red'], name='Hair Color'), 'Count' : pd.Series( [108, 125, 286, 73], name = 'Count')} ) hair_df # plot a vertical bar chart of hair colors using pandas / matplotlib # Hair color names on the X axis, and chart title of "Counts of Hair Colors" # plot the EXACT same thing using plotly / cufflinks # next, plot a pie chart of EYE COLORS using pandas / matplotlib # make sure to set the labels on the pie to 'Blue', 'Brown' etc... # plot the same pie chart with plot.ly # In this last step you will plot a scatter plot with both pandas and plotly / cufflinks #let's get the data: cars = pd.read_csv('https://raw.githubusercontent.com/mafudge/datasets/master/cars/weight-vs-mpg.csv') cars.sample(5) # plot use pandas / matplotlib to plot a scatter of weight vs mpg for cars # Use cufflinks to plot a scatter here. ``` ## : Reflection Reflect upon your experience completing this assignment. This should be a personal narrative, in your own voice, and cite specifics relevant to the activity as to help the grader understand how you arrived at the code you submitted. Things to consider touching upon: Elaborate on the process itself. Did your original problem analysis work as designed? How many iterations did you go through before you arrived at the solution? Where did you struggle along the way and how did you overcome it? What did you learn from completing the assignment? What do you need to work on to get better? What was most valuable and least valuable about this exercise? Do you have any suggestions for improvements? To make a good reflection, you should journal your thoughts, questions and comments while you complete the exercise. Keep your response to between 100 and 250 words. `--== Write Your Reflection Below Here ==--`
github_jupyter
import pandas as pd import plotly as py import matplotlib as plt import cufflinks as cf %matplotlib inline # setup the credentials # Sign up for a FREE plot.ly account and get your API credentials. https://plot.ly/settings/api py.tools.set_credentials_file(username='????', api_key='????') eye_data = [ {'Eye Color' : 'Blue', 'Count' : 190 }, {'Eye Color' : 'Brown', 'Count' : 245 }, {'Eye Color' : 'Green', 'Count' : 64 }, {'Eye Color' : 'Hazel', 'Count' : 93 } ] eye_df = pd.DataFrame(eye_data) eye_df hair_df = pd.DataFrame( {'Hair Color' :pd.Series( ['Black', 'Blond', 'Brown', 'Red'], name='Hair Color'), 'Count' : pd.Series( [108, 125, 286, 73], name = 'Count')} ) hair_df # plot a vertical bar chart of hair colors using pandas / matplotlib # Hair color names on the X axis, and chart title of "Counts of Hair Colors" # plot the EXACT same thing using plotly / cufflinks # next, plot a pie chart of EYE COLORS using pandas / matplotlib # make sure to set the labels on the pie to 'Blue', 'Brown' etc... # plot the same pie chart with plot.ly # In this last step you will plot a scatter plot with both pandas and plotly / cufflinks #let's get the data: cars = pd.read_csv('https://raw.githubusercontent.com/mafudge/datasets/master/cars/weight-vs-mpg.csv') cars.sample(5) # plot use pandas / matplotlib to plot a scatter of weight vs mpg for cars # Use cufflinks to plot a scatter here.
0.718298
0.857768
# Interpolation with scipy ``` %matplotlib inline ``` Official documentation: https://docs.scipy.org/doc/scipy/reference/interpolate.html ## Import modules and initialize data ``` import numpy as np import pandas as pd import scipy.interpolate import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d ``` ## Interpolate 1D functions In the following examples, we interpolate $f(x) \mapsto \sin(x)$ ``` xmin, xmax = 0., 4*np.pi x = np.linspace(xmin, xmax, 10) y = np.sin(x) x2 = np.linspace(xmin, xmax, 100) ``` ### Linear interpolation ``` # Linear interpolation with extrapolation f = scipy.interpolate.interp1d(x, y, kind='linear', fill_value="extrapolate") y2 = f(x2) plt.plot(x, y, "o:b", label="original") plt.plot(x2, y2, ".-r", label="interpolated") plt.legend(); ``` ### B-Splines interpolation https://docs.scipy.org/doc/scipy/reference/interpolate.html#d-splines ``` # https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splrep.html#scipy.interpolate.splrep # https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splprep.html#scipy.interpolate.splprep spl = scipy.interpolate.splrep(x, y) y2 = scipy.interpolate.splev(x2, spl) plt.plot(x, y, "o:b", label="original") plt.plot(x2, y2, ".-r", label="interpolated") plt.legend(); spl = scipy.interpolate.splrep(x, y, xb=x[0], xe=x[-1], # The interval to fit #s=0., # A smoothing factor k=1) # The degree fo the spline fit y2 = scipy.interpolate.splev(x2, spl) plt.plot(x, y, "o:b", label="original") plt.plot(x2, y2, ".-r", label="interpolated") plt.legend(); ``` ### Spline linear interpolation ``` # Spline linear interpolation with extrapolation (should be the same than spline1...) f = scipy.interpolate.interp1d(x, y, kind='slinear', fill_value="extrapolate") y2 = f(x2) plt.plot(x, y, "o:b", label="original") plt.plot(x2, y2, ".-r", label="interpolated") plt.legend(); ``` ## Interpolate 2D functions In the following examples, we interpolate $f(x, y) \mapsto \sin(x) + \sin(y)$ ``` # Build data x = np.arange(-1*np.pi, 1*np.pi, np.pi/4) y = np.arange(-1*np.pi, 1*np.pi, np.pi/4) xx, yy = np.meshgrid(x, y) z = np.sin(xx) + np.sin(yy) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig) #ax.plot_wireframe(xx, yy, z) surf = ax.plot_surface(xx, yy, z, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, shade=False) plt.title("Original data") plt.show(); ``` ### Linear interpolation Documentation: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp2d.html#scipy.interpolate.interp2d ``` f = scipy.interpolate.interp2d(x, y, z, kind='linear', bounds_error=True) # Let 'f' raise an exception when the requested point is outside the range defined by x and y # Build data x_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) y_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) xx_hd,yy_hd = np.meshgrid(x_hd, y_hd) z_hd = np.zeros(xx_hd.shape) for xi in range(z_hd.shape[0]): for yi in range(z_hd.shape[1]): z_hd[xi, yi] = f(x_hd[xi], y_hd[yi]) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig) surf = ax.plot_surface(xx_hd, yy_hd, z_hd, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, shade=False) plt.show(); ``` ### Non uniform grid data Documentation: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp2d.html#scipy.interpolate.interp2d ``` # Build data x_nu = np.arange(-1*np.pi, 1*np.pi, np.pi/4) y_nu = np.arange(-1*np.pi, 1*np.pi, np.pi/4) x_nu = x_nu.tolist() y_nu = y_nu.tolist() del x_nu[2] del y_nu[2] xx, yy = np.meshgrid(x_nu, y_nu) z_nu = np.sin(xx) + np.sin(yy) f = scipy.interpolate.interp2d(x_nu, y_nu, z_nu, kind='linear', bounds_error=True) # Let 'f' raise an exception when the requested point is outside the range defined by x and y # Build data x_nu_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) y_nu_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) xx_nu_hd,yy_nu_hd = np.meshgrid(x_nu_hd, y_nu_hd) z_nu_hd = np.zeros(xx_nu_hd.shape) for xi in range(z_nu_hd.shape[0]): for yi in range(z_nu_hd.shape[1]): z_nu_hd[xi, yi] = f(x_nu_hd[xi], y_nu_hd[yi]) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig) surf = ax.plot_surface(xx_nu_hd, yy_nu_hd, z_nu_hd, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, shade=False) surf = ax.plot_surface(xx_hd, yy_hd, z_hd, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, alpha=0.5, shade=False) plt.show(); ``` ### Cubic splines ``` f = scipy.interpolate.interp2d(x, y, z, kind='cubic', bounds_error=True) # Let 'f' raise an exception when the requested point is outside the range defined by x and y # Build data x_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) y_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) xx_hd,yy_hd = np.meshgrid(x_hd, y_hd) z_hd = np.zeros(xx_hd.shape) for xi in range(z_hd.shape[0]): for yi in range(z_hd.shape[1]): z_hd[xi, yi] = f(x_hd[xi], y_hd[yi]) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig) surf = ax.plot_surface(xx_hd, yy_hd, z_hd, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, shade=False) plt.show(); ``` ### Interpolate unstructured D-dimensional data Documentation: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html #### Scipy official documentation example Example taken from https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html ``` def func(x, y): return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2 grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j] points = np.random.rand(1000, 2) values = func(points[:,0], points[:,1]) grid_z0 = scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='nearest') grid_z1 = scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='linear') grid_z2 = scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='cubic') plt.subplot(221) plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower') plt.plot(points[:,0], points[:,1], 'k.', ms=1) plt.title('Original') plt.subplot(222) plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower') plt.title('Nearest') plt.subplot(223) plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower') plt.title('Linear') plt.subplot(224) plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower') plt.title('Cubic') plt.gcf().set_size_inches(6, 6) plt.tight_layout() plt.show() ``` #### Callable version ``` class InterpoledGridData: def __init__(self, x, y, z, interpolation_method='linear', fill_value=float('nan')): self.x = x self.y = y self.z = z self.fill_value = fill_value self.interpolation_method = interpolation_method def __call__(self, x1_mesh, x2_mesh): z = scipy.interpolate.griddata(points = (self.x, self.y), values = self.z, xi = (x1_mesh, x2_mesh), fill_value=self.fill_value, method = self.interpolation_method) if z.ndim == 0: z = float(z) return z x = np.random.rand(1000) y = np.random.rand(1000) z = func(x, y) f = InterpoledGridData(x, y, z, interpolation_method='cubic') f(0.5, 0.5) x_hd = np.linspace(x.min(), x.max(), 100) y_hd = np.linspace(y.min(), y.max(), 100) xx_hd, yy_hd = np.meshgrid(x_hd, y_hd) z_hd = f(xx_hd, yy_hd) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig, azim=150, elev=30) surf = ax.plot_surface(xx_hd, yy_hd, z_hd, cmap='gnuplot2', rstride=1, cstride=1, vmin=np.nanmin(z_hd), vmax=np.nanmax(z_hd), shade=False) plt.show(); fig, ax = plt.subplots(figsize=(12, 8)) im = ax.pcolormesh(xx_hd, yy_hd, z_hd, #shading='gouraud', cmap='gnuplot2') plt.colorbar(im, ax=ax) plt.show(); ```
github_jupyter
%matplotlib inline import numpy as np import pandas as pd import scipy.interpolate import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d xmin, xmax = 0., 4*np.pi x = np.linspace(xmin, xmax, 10) y = np.sin(x) x2 = np.linspace(xmin, xmax, 100) # Linear interpolation with extrapolation f = scipy.interpolate.interp1d(x, y, kind='linear', fill_value="extrapolate") y2 = f(x2) plt.plot(x, y, "o:b", label="original") plt.plot(x2, y2, ".-r", label="interpolated") plt.legend(); # https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splrep.html#scipy.interpolate.splrep # https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splprep.html#scipy.interpolate.splprep spl = scipy.interpolate.splrep(x, y) y2 = scipy.interpolate.splev(x2, spl) plt.plot(x, y, "o:b", label="original") plt.plot(x2, y2, ".-r", label="interpolated") plt.legend(); spl = scipy.interpolate.splrep(x, y, xb=x[0], xe=x[-1], # The interval to fit #s=0., # A smoothing factor k=1) # The degree fo the spline fit y2 = scipy.interpolate.splev(x2, spl) plt.plot(x, y, "o:b", label="original") plt.plot(x2, y2, ".-r", label="interpolated") plt.legend(); # Spline linear interpolation with extrapolation (should be the same than spline1...) f = scipy.interpolate.interp1d(x, y, kind='slinear', fill_value="extrapolate") y2 = f(x2) plt.plot(x, y, "o:b", label="original") plt.plot(x2, y2, ".-r", label="interpolated") plt.legend(); # Build data x = np.arange(-1*np.pi, 1*np.pi, np.pi/4) y = np.arange(-1*np.pi, 1*np.pi, np.pi/4) xx, yy = np.meshgrid(x, y) z = np.sin(xx) + np.sin(yy) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig) #ax.plot_wireframe(xx, yy, z) surf = ax.plot_surface(xx, yy, z, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, shade=False) plt.title("Original data") plt.show(); f = scipy.interpolate.interp2d(x, y, z, kind='linear', bounds_error=True) # Let 'f' raise an exception when the requested point is outside the range defined by x and y # Build data x_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) y_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) xx_hd,yy_hd = np.meshgrid(x_hd, y_hd) z_hd = np.zeros(xx_hd.shape) for xi in range(z_hd.shape[0]): for yi in range(z_hd.shape[1]): z_hd[xi, yi] = f(x_hd[xi], y_hd[yi]) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig) surf = ax.plot_surface(xx_hd, yy_hd, z_hd, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, shade=False) plt.show(); # Build data x_nu = np.arange(-1*np.pi, 1*np.pi, np.pi/4) y_nu = np.arange(-1*np.pi, 1*np.pi, np.pi/4) x_nu = x_nu.tolist() y_nu = y_nu.tolist() del x_nu[2] del y_nu[2] xx, yy = np.meshgrid(x_nu, y_nu) z_nu = np.sin(xx) + np.sin(yy) f = scipy.interpolate.interp2d(x_nu, y_nu, z_nu, kind='linear', bounds_error=True) # Let 'f' raise an exception when the requested point is outside the range defined by x and y # Build data x_nu_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) y_nu_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) xx_nu_hd,yy_nu_hd = np.meshgrid(x_nu_hd, y_nu_hd) z_nu_hd = np.zeros(xx_nu_hd.shape) for xi in range(z_nu_hd.shape[0]): for yi in range(z_nu_hd.shape[1]): z_nu_hd[xi, yi] = f(x_nu_hd[xi], y_nu_hd[yi]) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig) surf = ax.plot_surface(xx_nu_hd, yy_nu_hd, z_nu_hd, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, shade=False) surf = ax.plot_surface(xx_hd, yy_hd, z_hd, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, alpha=0.5, shade=False) plt.show(); f = scipy.interpolate.interp2d(x, y, z, kind='cubic', bounds_error=True) # Let 'f' raise an exception when the requested point is outside the range defined by x and y # Build data x_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) y_hd = np.arange(-1*np.pi, 1*np.pi-np.pi/4, np.pi/32) xx_hd,yy_hd = np.meshgrid(x_hd, y_hd) z_hd = np.zeros(xx_hd.shape) for xi in range(z_hd.shape[0]): for yi in range(z_hd.shape[1]): z_hd[xi, yi] = f(x_hd[xi], y_hd[yi]) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig) surf = ax.plot_surface(xx_hd, yy_hd, z_hd, cmap='gnuplot2', # 'jet' # 'gnuplot2' rstride=1, cstride=1, shade=False) plt.show(); def func(x, y): return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2 grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j] points = np.random.rand(1000, 2) values = func(points[:,0], points[:,1]) grid_z0 = scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='nearest') grid_z1 = scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='linear') grid_z2 = scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='cubic') plt.subplot(221) plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower') plt.plot(points[:,0], points[:,1], 'k.', ms=1) plt.title('Original') plt.subplot(222) plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower') plt.title('Nearest') plt.subplot(223) plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower') plt.title('Linear') plt.subplot(224) plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower') plt.title('Cubic') plt.gcf().set_size_inches(6, 6) plt.tight_layout() plt.show() class InterpoledGridData: def __init__(self, x, y, z, interpolation_method='linear', fill_value=float('nan')): self.x = x self.y = y self.z = z self.fill_value = fill_value self.interpolation_method = interpolation_method def __call__(self, x1_mesh, x2_mesh): z = scipy.interpolate.griddata(points = (self.x, self.y), values = self.z, xi = (x1_mesh, x2_mesh), fill_value=self.fill_value, method = self.interpolation_method) if z.ndim == 0: z = float(z) return z x = np.random.rand(1000) y = np.random.rand(1000) z = func(x, y) f = InterpoledGridData(x, y, z, interpolation_method='cubic') f(0.5, 0.5) x_hd = np.linspace(x.min(), x.max(), 100) y_hd = np.linspace(y.min(), y.max(), 100) xx_hd, yy_hd = np.meshgrid(x_hd, y_hd) z_hd = f(xx_hd, yy_hd) # Plot data fig = plt.figure(figsize=(12, 8)) ax = axes3d.Axes3D(fig, azim=150, elev=30) surf = ax.plot_surface(xx_hd, yy_hd, z_hd, cmap='gnuplot2', rstride=1, cstride=1, vmin=np.nanmin(z_hd), vmax=np.nanmax(z_hd), shade=False) plt.show(); fig, ax = plt.subplots(figsize=(12, 8)) im = ax.pcolormesh(xx_hd, yy_hd, z_hd, #shading='gouraud', cmap='gnuplot2') plt.colorbar(im, ax=ax) plt.show();
0.778102
0.909466
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA from sklearn.linear_model import LogisticRegression as LR from sklearn.neighbors import KernelDensity from sklearn.model_selection import GridSearchCV import warnings warnings.filterwarnings("ignore") ``` ## Ex. 6.12 ``` def get_data(path): data=np.loadtxt(path) X=data[:, 1:] y=data[:, 0] return X, y X_train, y_train=get_data('./data/zip.train') X_test, y_test=get_data('./data/zip.test') print(X_train.shape, X_test.shape) print(np.unique(y_train, return_counts=True)[1]) print(np.unique(y_test, return_counts=True)[1]) fit1 = LDA().fit(X_train, y_train) fit2 = QDA().fit(X_train, y_train) fit3 = LR(random_state=0).fit(X_train, y_train) result1 = fit1.score(X_train, y_train), fit1.score(X_test, y_test) result2 = fit2.score(X_train, y_train), fit2.score(X_test, y_test) result3 = fit3.score(X_train, y_train), fit3.score(X_test, y_test) print(result1) print(result2) print(result3) fit0 = LDA(n_components=9).fit(X_train, y_train) X_train_t = fit0.transform(X_train) X_test_t = fit0.transform(X_test) fit1 = LDA().fit(X_train_t, y_train) fit2 = QDA().fit(X_train_t, y_train) fit3 = LR(random_state=0).fit(X_train_t, y_train) result1 = fit1.score(X_train_t, y_train), fit1.score(X_test_t, y_test) result2 = fit2.score(X_train_t, y_train), fit2.score(X_test_t, y_test) result3 = fit3.score(X_train_t, y_train), fit3.score(X_test_t, y_test) print(result1) print(result2) print(result3) def fit(X_train, y_train, space): nums = [] kdes = [] for i in range(10): nums.append(np.sum(y_train == i)) X = X_train[y_train == i] grid = GridSearchCV(KernelDensity(), {'bandwidth': space}).fit(X) kdes.append(grid.best_estimator_) print('{}th ({}) best bandwidth: {}'.format(i+1, nums[-1], kdes[-1].bandwidth)) return np.log(nums), kdes def score(pri, lik, X, y): Y = [] for i, kde in enumerate(lik): Y.append(pri[i] + kde.score_samples(X)) yh = np.argmax(Y, axis=0) return (y == yh).mean() pri1, lik1 = fit(X_train, y_train, np.linspace(0.05, 0.4, 50)) pri2, lik2 = fit(X_train_t, y_train, np.linspace(0.2, 0.9, 50)) result4 = score(pri1, lik1, X_train, y_train), score(pri1, lik1, X_test, y_test) result5 = score(pri2, lik2, X_train_t, y_train), score(pri2, lik2, X_test_t, y_test) print(result4) print(result5) pri0 = np.zeros(10) result4 = score(pri0, lik1, X_train, y_train), score(pri0, lik1, X_test, y_test) result5 = score(pri0, lik2, X_train_t, y_train), score(pri0, lik2, X_test_t, y_test) print(result4) print(result5) X_train = pd.read_csv('./data/vowel.train', index_col=0) X_test = pd.read_csv('./data/vowel.test', index_col=0) y_train = X_train.pop('y') y_test = X_test.pop('y') X_train = X_train.values y_train = y_train.values - 1 X_test = X_test.values y_test = y_test.values - 1 print(X_train.shape, X_test.shape) print(np.unique(y_train, return_counts=True)[1]) print(np.unique(y_test, return_counts=True)[1]) fit1 = LDA().fit(X_train, y_train) fit2 = QDA().fit(X_train, y_train) fit3 = LR(random_state=0).fit(X_train, y_train) result1 = fit1.score(X_train, y_train), fit1.score(X_test, y_test) result2 = fit2.score(X_train, y_train), fit2.score(X_test, y_test) result3 = fit3.score(X_train, y_train), fit3.score(X_test, y_test) print(result1) print(result2) print(result3) pri, lik = fit(X_train, y_train, np.linspace(0.3, 0.7, 50)) result4 = score(pri, lik, X_train, y_train), score(pri, lik, X_test, y_test) print(result4) ```
github_jupyter
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA from sklearn.linear_model import LogisticRegression as LR from sklearn.neighbors import KernelDensity from sklearn.model_selection import GridSearchCV import warnings warnings.filterwarnings("ignore") def get_data(path): data=np.loadtxt(path) X=data[:, 1:] y=data[:, 0] return X, y X_train, y_train=get_data('./data/zip.train') X_test, y_test=get_data('./data/zip.test') print(X_train.shape, X_test.shape) print(np.unique(y_train, return_counts=True)[1]) print(np.unique(y_test, return_counts=True)[1]) fit1 = LDA().fit(X_train, y_train) fit2 = QDA().fit(X_train, y_train) fit3 = LR(random_state=0).fit(X_train, y_train) result1 = fit1.score(X_train, y_train), fit1.score(X_test, y_test) result2 = fit2.score(X_train, y_train), fit2.score(X_test, y_test) result3 = fit3.score(X_train, y_train), fit3.score(X_test, y_test) print(result1) print(result2) print(result3) fit0 = LDA(n_components=9).fit(X_train, y_train) X_train_t = fit0.transform(X_train) X_test_t = fit0.transform(X_test) fit1 = LDA().fit(X_train_t, y_train) fit2 = QDA().fit(X_train_t, y_train) fit3 = LR(random_state=0).fit(X_train_t, y_train) result1 = fit1.score(X_train_t, y_train), fit1.score(X_test_t, y_test) result2 = fit2.score(X_train_t, y_train), fit2.score(X_test_t, y_test) result3 = fit3.score(X_train_t, y_train), fit3.score(X_test_t, y_test) print(result1) print(result2) print(result3) def fit(X_train, y_train, space): nums = [] kdes = [] for i in range(10): nums.append(np.sum(y_train == i)) X = X_train[y_train == i] grid = GridSearchCV(KernelDensity(), {'bandwidth': space}).fit(X) kdes.append(grid.best_estimator_) print('{}th ({}) best bandwidth: {}'.format(i+1, nums[-1], kdes[-1].bandwidth)) return np.log(nums), kdes def score(pri, lik, X, y): Y = [] for i, kde in enumerate(lik): Y.append(pri[i] + kde.score_samples(X)) yh = np.argmax(Y, axis=0) return (y == yh).mean() pri1, lik1 = fit(X_train, y_train, np.linspace(0.05, 0.4, 50)) pri2, lik2 = fit(X_train_t, y_train, np.linspace(0.2, 0.9, 50)) result4 = score(pri1, lik1, X_train, y_train), score(pri1, lik1, X_test, y_test) result5 = score(pri2, lik2, X_train_t, y_train), score(pri2, lik2, X_test_t, y_test) print(result4) print(result5) pri0 = np.zeros(10) result4 = score(pri0, lik1, X_train, y_train), score(pri0, lik1, X_test, y_test) result5 = score(pri0, lik2, X_train_t, y_train), score(pri0, lik2, X_test_t, y_test) print(result4) print(result5) X_train = pd.read_csv('./data/vowel.train', index_col=0) X_test = pd.read_csv('./data/vowel.test', index_col=0) y_train = X_train.pop('y') y_test = X_test.pop('y') X_train = X_train.values y_train = y_train.values - 1 X_test = X_test.values y_test = y_test.values - 1 print(X_train.shape, X_test.shape) print(np.unique(y_train, return_counts=True)[1]) print(np.unique(y_test, return_counts=True)[1]) fit1 = LDA().fit(X_train, y_train) fit2 = QDA().fit(X_train, y_train) fit3 = LR(random_state=0).fit(X_train, y_train) result1 = fit1.score(X_train, y_train), fit1.score(X_test, y_test) result2 = fit2.score(X_train, y_train), fit2.score(X_test, y_test) result3 = fit3.score(X_train, y_train), fit3.score(X_test, y_test) print(result1) print(result2) print(result3) pri, lik = fit(X_train, y_train, np.linspace(0.3, 0.7, 50)) result4 = score(pri, lik, X_train, y_train), score(pri, lik, X_test, y_test) print(result4)
0.442155
0.849909
# Tutorial ToHyDAMOgml 2 juni 2020, Jeroen Winkelhorst, Royal HaskoningDHV In deze notebook staat een voorbeeld uitgewerkt voor de aanmaken van een HyDAMO GML bestand. Dit voorbeeld is uitgewerkt voor drie DAMO objecten, namelijk `Hydroobject`, `DuikerSifonHevel` en `Stuw`. De benodigde bronbestanden zijn bijgevoegd in de map `examples/gdb`. Meer informatie over het HyDAMO datamodel vind je hier: http://www.nhi.nu/nl/index.php/uitvoering/module-oppervlaktewater/ Ben je niet bekend met Jupyter Notebooks? Lees dan deze korte introductie (engels): https://nbviewer.jupyter.org/github/jupyter/notebook/blob/master/docs/source/examples/Notebook/Notebook%20Basics.ipynb De installatie van de ToHyDAMOgml tool staat toegelicht in de Readme van deze package. ### Inladen functies Als eerste wordt de map waarin de `hydamo_gml` package staat toegevoegd aan het `systeem path`. Vervolgens worden de benodigde dependencies ingeladen. ``` import sys # Tussen de r'' staat de verwijzing naar de map van `hydamo_gml`. Voor dit voorbeeld staat de relatieve verwijzing goed ingesteld. # sys.path.append(r'../..') # Deze map 'hydamo_gml' map bevat tenminste de volgende submappen: # - `examples\` # - `hydamo_gml\` # - `scripts\` # - `src\` # Importeer de benodigde packages import os from tohydamogml.hydamo_table import HydamoObject %matplotlib notebook ``` ### Hydro object Als eerste gaan we het object Hydro object (watergangen) omzetten naar een GML bestand. Daarvoor doorlopen we de volgende stappen: 1. Inladen configuratiebestand hydroobject.json 2. Aanmaken Python HydamoObject. In dit object is de GML code opgebouwd. 3. Uitvoeren validatie van de GML code 4. wegschrijven van de GML code naar een GML bestand #### Configuratie bestanden In de configuratie bestanden staat per HyDAMO object gedefinieerd hoe het object is opgebouwd. De configuratie is vastgelegd in JSON bestanden die een vergelijkbare opbouw hebben als Python dictionaries. De opbouw van de JSON bestanden is toegelicht in een notitie die te vinden is in de map `docs`. In dit voorbeeld gaan we als eerste de watergangen omzetten. Daarvoor hebben we het json bestand van `HydroObject` nodig. ``` json_hydroobject = r"json/hydroobject.json" ``` Neem eens een kijkje in dit json bestand. Dat bestand kun je openen met een text editor zoals `notepad`. In dit bestand staat alle informatie die nodig is een HyDAMO object aan te maken. Het is belangrijk dat de verwijzing naar de brondatabase (beheerregister) goed staat in het JSON bestand. #### Aanmaken HyDAMO object Voor dit voorbeeld staan de brongegevens goed, dus we laden het hydroobject in. Hiervoor is een internetverbinding noodzakelijk omdat gml protocollen van internet worden gehaald. Het is mogelijk om het via het attribuut `mask` een polygon mee te geven om de uitvoer te beperken tot een bepaald gebied. Dit is optioneel. (het is mogelijk dat je een foutmelding krijgt over een 'data rate limit'. Restart dan deze notebook van de python prompt met het volgende commando: `jupyter notebook --NotebookApp.iopub_data_rate_limit=1e10`) ``` gebied = r"shp/oosterwolde_clip.shp" obj = HydamoObject(json_hydroobject, mask=gebied) ``` De GML output is na het aanmaken weergegeven maar nog niet weggeschreven naar een bestand. De informatie is in tabelform in te zien via het commanda `obj.gdf`. ``` obj.gdf ``` De geometrie kan eenvoudig geplot worden met het commando `obj.gdf.plot()` ``` obj.gdf.plot() ``` #### Validatie HyDAMO object Het is mogelijk om de gml output te valideren met behulp van bijgeleverde XSD bestanden. Het is daarbij mogelijk om de validatie fouten weg te schrijven naar een shape. ``` obj.validate_gml(write_error_log=True) ``` Als de validatie fouten bevat zijn deze weggeschreven naar een logbestand in de map `wvv_notebook/log`. De log bestaat uit een shape of csv bestand. Zoek dit bestand eens op en bekijk de inhoud. Het XSD bestand dat wordt gebruikt voor validatie staat in de map `src/xsd`. Als er een nieuwere versie beschikbaar is op `https://github.com/erikderooij/nhi/tree/master/schema` kunnen de bestanden in deze map worden overschreven. #### Exporteren gml naar bestand Na de validatie exporteren we de gml naar een bestand. De `write_gml` functie voert automatisch een validatie uit. Als je de validatie al hebt uitgevoerd (zoals we net gedaan hebben in de vorige stap) kun je de validatie overslaan met de optie `skip_validation=True`. Als er validatie errors in het het object zitten wordt is de standaard instelling dat er geen output wordt weggeschreven. Wil je dit toch, gebruik dan de optie `ignore_errors=True`. We schrijven het gml bestand weg naar de map 'output' en negeren de validatie errors: ``` obj.write_gml(r"output", ignore_errors=True, skip_validation=True) ``` ### Object stuw Het object stuw verschilt van HydroObject omdat bij dit object in de JSON configuratie wordt verwezen naar attribuutspecifieke functies. De locatie van de functies moet in aanvulling op de json locatie worden ingeladen. In het volgende voorbeeld wordt het object stuw weggeschreven naar een gml bestand. ``` json_stuw = r"json/stuw.json" attr_function = r"json/attribute_functions.py" obj_stuw = HydamoObject(json_stuw, file_attribute_functions=attr_function) obj_stuw.write_gml(r"output", ignore_errors=True, skip_validation=False) ``` ### Opdrachten In deze opdracht ga je als eerste het DuikerSifonHevel object exporteren naar GML. Vervolgens ga je de JSON aanpassen zodat je een extra kolom exporteert. 1. Open de JSON van het DuikerSifonHevel object (`examples\wvv_notebook\json\duikersifonhevel.json`) en probeer de opbouw te begrijpen. De documentatie in de map `docs` kan hierbij helpen. 2. Open de gis laag `duikersifonhevel` uit het beheerregister (`examples\wvv_notebook\gdb\Breg_Hydamo.gdb`) en bekijk hoe de JSON verwijst naar de kolommen in het beheerregister 3. Laad het object DuikerSifonHevel in voor het gebied oosterwolde ( `shp/clip_oosterwolde.shp`) en plot de duikers. ``` gebied = r"shp/oosterwolde_clip.shp" json_duiker = r"json/duikersifonhevel.json" attr_function = r"json/attribute_functions.py" obj_duiker = HydamoObject(json_duiker, file_attribute_functions=attr_function, mask=gebied) #obj_duiker.write_gml(r"output", ignore_errors=True, skip_validation=False) obj_duiker.gdf.plot() ``` 4. In het voorbeeld zijn de duikers gefilterd. Op welk attribuut is het object gefilterd en wat betekent dit voor de resultaten? <br> Hint: - 1 planvorming - 2 realisatie - 3 gerealiseerd - 4 buiten bedrijf - 5 niet meer aanwezig - 7 te verwijderen - 99 onbekend ` "filter": { "STATUSOBJECT": [ 3 ] ` Het object is gefilterd op de statusobjectcode. Dit betekent alleen de gerealiseerde duikers zijn opgenomen in het object 5. Maak een kopie van de JSON en filter hierin de duikers op 'niet meer aanwezig' en 'te verwijderen'. Plot het resultaat. ``` json_vraag5 = r"json/duikersifonhevel_antwoord_vraag_5.json" obj_duiker = HydamoObject(json_vraag5, file_attribute_functions=attr_function, mask=gebied) obj_duiker.gdf.plot() ``` 6. Achterhaal hoe de kolom ruwheidswaarde in het JSON bestand wordt afgeleid. - Wat is de kolom uit het beheerregister waar naar wordt verwezen? - Welk type output zie je terug in de GML? - Hoe is de data omgezet? Hint: kijk in attribute_functions.py - In de functie `obj_soortmateriaal` wordt verwezen naar de kolom SOORTMATERIAAL. In de kolom staat een materiaalnaam. - In de output zijn de ruwheidswaarden getallen (integers) - De data is omgezet in via de functie `obj_soortmateriaal`. Zie de toelichting hieronder. In het JSON bestand wordt verwezen naar een functie uit `attribute functions.py`, namelijk `obj_soortmateriaal`: ` { "name": "ruwheidswaarde", "type": "Double", "required": true, "src_col": "", "func": "obj_soortmateriaal", "default": 999 }, ` Als we in `attribute functions.py` kijken zien we de volgende functie definitie: ` def obj_soortmateriaal(damo_gdf=None, obj=None, damo_soortmateriaal="SOORTMATERIAAL"): return damo_gdf.apply(lambda x: _obj_get_soortmateriaal(x[damo_soortmateriaal]), axis=1) ` De functie obj_soortmateriaal heeft als input de gefilterde duikersifonhevel laag uit DAMO (in de functie is dit `damo_gdf`). Via een `damo_gdf.apply` functie wordt per rij de functie `_obj_get_soortmateriaal` toegepast. De functie `_obj_get_soortmateriaal` heeft de kolom `SOORTMATERIAAL` uit het duikersifonhevel object als input en geeft voor een materiaalnaam (input) een ruwheidswaarde terug. Bijvoorbeeld voor het materiaal beton wordt een ruwheidswaarde van 75 teruggegeven. Dus voor elke rij is op basis van de kolom “SOORTMATERIAAL” een ruwheidswaarde bepaald. ` def _obj_get_soortmateriaal(materiaalcode): """Return Strickler Ks waarde Bron: Ven te Chow - Open channel hydraulics tbl 5-6 TODO: omschrijven naar dictionary in config""" if materiaalcode in MATERIAALKUNSTWERK.keys(): if MATERIAALKUNSTWERK[materiaalcode] == "beton": return 75 if MATERIAALKUNSTWERK[materiaalcode] == "gewapend beton": return 75 if MATERIAALKUNSTWERK[materiaalcode] == "metselwerk": return 65 if MATERIAALKUNSTWERK[materiaalcode] == "metaal": return 80 if MATERIAALKUNSTWERK[materiaalcode] == "aluminium": return 80 if MATERIAALKUNSTWERK[materiaalcode] == "ijzer": return 80 if MATERIAALKUNSTWERK[materiaalcode] == "gietijzer": return 75 if MATERIAALKUNSTWERK[materiaalcode] == "PVC": return 80 if MATERIAALKUNSTWERK[materiaalcode] == "gegolfd plaatstaal": return 65 if MATERIAALKUNSTWERK[materiaalcode] == "asbestcement": return 110 return 999 ` 7. We gaan nu een extra kolom aan het duikersifonhevel object toevoegen, de kolom kolom `opmerking` (`OPMERKING`). - Maak een kopie van het originele JSON bestand - voeg de extra kolom `opmerking` (`OPMERKING`) toe - kies als default waarde "Geen opmerking" en "required": false - maak een hydamo object aan en bekijk de tabeloutput - zie je de default waarde terug? pas de JSON aan en zet "required": true. Wat zie je nu in de tabeloutput? hint: Het type is "String" Als required op `false` staat wordt de default waarde niet opgenomen bij missende input. Als required op `true` staat wel. ``` json_vraag7 = r"json/duikersifonhevel_antwoord_vraag_7.json" obj_duiker = HydamoObject(json_vraag7, file_attribute_functions=attr_function, mask=gebied) obj_duiker.gdf ``` 8. Exporteer het aangepaste duikersifonhevel object naar gml via het attribuut `suffix="tekst"` kun je de naam het export bestand veranderen zodat wordt voorkomen dat een bestand met dezelfde naam wordt overschreven. ``` obj_duiker.write_gml(r"output", ignore_errors=True, skip_validation=False, suffix="_vraag7") ``` <b>Dit is het einde van de tutorial. Je hebt nu de meest belangrijke stappen doorlopen om te begrijpen hoe de tool functioneert en hoe je informatie aanpast. Succes!</b>
github_jupyter
import sys # Tussen de r'' staat de verwijzing naar de map van `hydamo_gml`. Voor dit voorbeeld staat de relatieve verwijzing goed ingesteld. # sys.path.append(r'../..') # Deze map 'hydamo_gml' map bevat tenminste de volgende submappen: # - `examples\` # - `hydamo_gml\` # - `scripts\` # - `src\` # Importeer de benodigde packages import os from tohydamogml.hydamo_table import HydamoObject %matplotlib notebook json_hydroobject = r"json/hydroobject.json" gebied = r"shp/oosterwolde_clip.shp" obj = HydamoObject(json_hydroobject, mask=gebied) obj.gdf obj.gdf.plot() obj.validate_gml(write_error_log=True) obj.write_gml(r"output", ignore_errors=True, skip_validation=True) json_stuw = r"json/stuw.json" attr_function = r"json/attribute_functions.py" obj_stuw = HydamoObject(json_stuw, file_attribute_functions=attr_function) obj_stuw.write_gml(r"output", ignore_errors=True, skip_validation=False) gebied = r"shp/oosterwolde_clip.shp" json_duiker = r"json/duikersifonhevel.json" attr_function = r"json/attribute_functions.py" obj_duiker = HydamoObject(json_duiker, file_attribute_functions=attr_function, mask=gebied) #obj_duiker.write_gml(r"output", ignore_errors=True, skip_validation=False) obj_duiker.gdf.plot() json_vraag5 = r"json/duikersifonhevel_antwoord_vraag_5.json" obj_duiker = HydamoObject(json_vraag5, file_attribute_functions=attr_function, mask=gebied) obj_duiker.gdf.plot() json_vraag7 = r"json/duikersifonhevel_antwoord_vraag_7.json" obj_duiker = HydamoObject(json_vraag7, file_attribute_functions=attr_function, mask=gebied) obj_duiker.gdf obj_duiker.write_gml(r"output", ignore_errors=True, skip_validation=False, suffix="_vraag7")
0.220259
0.886371
## Our Mission ## Spam detection is one of the major applications of Machine Learning in the interwebs today. Pretty much all of the major email service providers have spam detection systems built in and automatically classify such mail as 'Junk Mail'. In this mission we will be using the Naive Bayes algorithm to create a model that can classify [dataset](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection) SMS messages as spam or not spam, based on the training we give to the model. It is important to have some level of intuition as to what a spammy text message might look like. Usually they have words like 'free', 'win', 'winner', 'cash', 'prize' and the like in them as these texts are designed to catch your eye and in some sense tempt you to open them. Also, spam messages tend to have words written in all capitals and also tend to use a lot of exclamation marks. To the recipient, it is usually pretty straightforward to identify a spam text and our objective here is to train a model to do that for us! Being able to identify spam messages is a binary classification problem as messages are classified as either 'Spam' or 'Not Spam' and nothing else. Also, this is a supervised learning problem, as we will be feeding a labelled dataset into the model, that it can learn from, to make future predictions. ### Step 0: Introduction to the Naive Bayes Theorem ### Bayes theorem is one of the earliest probabilistic inference algorithms developed by Reverend Bayes (which he used to try and infer the existence of God no less) and still performs extremely well for certain use cases. It's best to understand this theorem using an example. Let's say you are a member of the Secret Service and you have been deployed to protect the Democratic presidential nominee during one of his/her campaign speeches. Being a public event that is open to all, your job is not easy and you have to be on the constant lookout for threats. So one place to start is to put a certain threat-factor for each person. So based on the features of an individual, like the age, sex, and other smaller factors like is the person carrying a bag?, does the person look nervous? etc. you can make a judgement call as to if that person is viable threat. If an individual ticks all the boxes up to a level where it crosses a threshold of doubt in your mind, you can take action and remove that person from the vicinity. The Bayes theorem works in the same way as we are computing the probability of an event(a person being a threat) based on the probabilities of certain related events(age, sex, presence of bag or not, nervousness etc. of the person). One thing to consider is the independence of these features amongst each other. For example if a child looks nervous at the event then the likelihood of that person being a threat is not as much as say if it was a grown man who was nervous. To break this down a bit further, here there are two features we are considering, age AND nervousness. Say we look at these features individually, we could design a model that flags ALL persons that are nervous as potential threats. However, it is likely that we will have a lot of false positives as there is a strong chance that minors present at the event will be nervous. Hence by considering the age of a person along with the 'nervousness' feature we would definitely get a more accurate result as to who are potential threats and who aren't. This is the 'Naive' bit of the theorem where it considers each feature to be independant of each other which may not always be the case and hence that can affect the final judgement. In short, the Bayes theorem calculates the probability of a certain event happening(in our case, a message being spam) based on the joint probabilistic distributions of certain other events(in our case, a message being classified as spam). We will dive into the workings of the Bayes theorem later in the mission, but first, let us understand the data we are going to work with. ### Step 1.1: Understanding our dataset ### We will be using a [dataset](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection) from the UCI Machine Learning repository which has a very good collection of datasets for experimental research purposes. The direct data link is [here](https://archive.ics.uci.edu/ml/machine-learning-databases/00228/). ** Here's a preview of the data: ** <img src="images/dqnb.png" height="1242" width="1242"> The columns in the data set are currently not named and as you can see, there are 2 columns. The first column takes two values, 'ham' which signifies that the message is not spam, and 'spam' which signifies that the message is spam. The second column is the text content of the SMS message that is being classified. >** Instructions: ** * Import the dataset into a pandas dataframe using the read_table method. Because this is a tab separated dataset we will be using '\t' as the value for the 'sep' argument which specifies this format. * Also, rename the column names by specifying a list ['label, 'sms_message'] to the 'names' argument of read_table(). * Print the first five values of the dataframe with the new column names. ``` ''' Solution ''' import pandas as pd # Dataset from - https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection df = pd.read_table('smsspamcollection/SMSSpamCollection', sep='\t', header=None, names=['label', 'sms_message']) # Output printing out first 5 columns df.head() ``` ### Step 1.2: Data Preprocessing ### Now that we have a basic understanding of what our dataset looks like, lets convert our labels to binary variables, 0 to represent 'ham'(i.e. not spam) and 1 to represent 'spam' for ease of computation. You might be wondering why do we need to do this step? The answer to this lies in how scikit-learn handles inputs. Scikit-learn only deals with numerical values and hence if we were to leave our label values as strings, scikit-learn would do the conversion internally(more specifically, the string labels will be cast to unknown float values). Our model would still be able to make predictions if we left our labels as strings but we could have issues later when calculating performance metrics, for example when calculating our precision and recall scores. Hence, to avoid unexpected 'gotchas' later, it is good practice to have our categorical values be fed into our model as integers. >**Instructions: ** * Convert the values in the 'label' colum to numerical values using map method as follows: {'ham':0, 'spam':1} This maps the 'ham' value to 0 and the 'spam' value to 1. * Also, to get an idea of the size of the dataset we are dealing with, print out number of rows and columns using 'shape'. ``` ''' Solution ''' df['label'] = df.label.map({'ham':0, 'spam':1}) print(df.shape) df.head() # returns (rows, columns) ``` ### Step 2.1: Bag of words ### What we have here in our data set is a large collection of text data (5,572 rows of data). Most ML algorithms rely on numerical data to be fed into them as input, and email/sms messages are usually text heavy. Here we'd like to introduce the Bag of Words(BoW) concept which is a term used to specify the problems that have a 'bag of words' or a collection of text data that needs to be worked with. The basic idea of BoW is to take a piece of text and count the frequency of the words in that text. It is important to note that the BoW concept treats each word individually and the order in which the words occur does not matter. Using a process which we will go through now, we can covert a collection of documents to a matrix, with each document being a row and each word(token) being the column, and the corresponding (row,column) values being the frequency of occurrance of each word or token in that document. For example: Lets say we have 4 documents as follows: `['Hello, how are you!', 'Win money, win from home.', 'Call me now', 'Hello, Call you tomorrow?']` Our objective here is to convert this set of text to a frequency distribution matrix, as follows: <img src="images/countvectorizer.png" height="542" width="542"> Here as we can see, the documents are numbered in the rows, and each word is a column name, with the corresponding value being the frequency of that word in the document. Lets break this down and see how we can do this conversion using a small set of documents. To handle this, we will be using sklearns [count vectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer) method which does the following: * It tokenizes the string(separates the string into individual words) and gives an integer ID to each token. * It counts the occurrance of each of those tokens. ** Please Note: ** * The CountVectorizer method automatically converts all tokenized words to their lower case form so that it does not treat words like 'He' and 'he' differently. It does this using the `lowercase` parameter which is by default set to `True`. * It also ignores all punctuation so that words followed by a punctuation mark (for example: 'hello!') are not treated differently than the same words not prefixed or suffixed by a punctuation mark (for example: 'hello'). It does this using the `token_pattern` parameter which has a default regular expression which selects tokens of 2 or more alphanumeric characters. * The third parameter to take note of is the `stop_words` parameter. Stop words refer to the most commonly used words in a language. They include words like 'am', 'an', 'and', 'the' etc. By setting this parameter value to `english`, CountVectorizer will automatically ignore all words(from our input text) that are found in the built in list of english stop words in scikit-learn. This is extremely helpful as stop words can skew our calculations when we are trying to find certain key words that are indicative of spam. We will dive into the application of each of these into our model in a later step, but for now it is important to be aware of such preprocessing techniques available to us when dealing with textual data. ### Step 2.2: Implementing Bag of Words from scratch ### Before we dive into scikit-learn's Bag of Words(BoW) library to do the dirty work for us, let's implement it ourselves first so that we can understand what's happening behind the scenes. ** Step 1: Convert all strings to their lower case form. ** Let's say we have a document set: ``` documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?'] ``` >>** Instructions: ** * Convert all the strings in the documents set to their lower case. Save them into a list called 'lower_case_documents'. You can convert strings to their lower case in python by using the lower() method. ``` ''' Solution: ''' documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?'] lower_case_documents = [] for i in documents: lower_case_documents.append(i.lower()) print(lower_case_documents) ``` ** Step 2: Removing all punctuations ** >>**Instructions: ** Remove all punctuation from the strings in the document set. Save them into a list called 'sans_punctuation_documents'. ``` ''' Solution: ''' sans_punctuation_documents = [] import string for i in lower_case_documents: sans_punctuation_documents.append(i.translate(str.maketrans('', '', string.punctuation))) print(sans_punctuation_documents) ``` ** Step 3: Tokenization ** Tokenizing a sentence in a document set means splitting up a sentence into individual words using a delimiter. The delimiter specifies what character we will use to identify the beginning and the end of a word(for example we could use a single space as the delimiter for identifying words in our document set.) >>**Instructions:** Tokenize the strings stored in 'sans_punctuation_documents' using the split() method. and store the final document set in a list called 'preprocessed_documents'. ``` ''' Solution: ''' preprocessed_documents = [] for i in sans_punctuation_documents: preprocessed_documents.append(i.split(' ')) print(preprocessed_documents) ``` ** Step 4: Count frequencies ** Now that we have our document set in the required format, we can proceed to counting the occurrence of each word in each document of the document set. We will use the `Counter` method from the Python `collections` library for this purpose. `Counter` counts the occurrence of each item in the list and returns a dictionary with the key as the item being counted and the corresponding value being the count of that item in the list. >>**Instructions:** Using the Counter() method and preprocessed_documents as the input, create a dictionary with the keys being each word in each document and the corresponding values being the frequncy of occurrence of that word. Save each Counter dictionary as an item in a list called 'frequency_list'. ``` ''' Solution ''' frequency_list = [] import pprint from collections import Counter for i in preprocessed_documents: frequency_counts = Counter(i) frequency_list.append(frequency_counts) pprint.pprint(frequency_list) ``` Congratulations! You have implemented the Bag of Words process from scratch! As we can see in our previous output, we have a frequency distribution dictionary which gives a clear view of the text that we are dealing with. We should now have a solid understanding of what is happening behind the scenes in the `sklearn.feature_extraction.text.CountVectorizer` method of scikit-learn. We will now implement `sklearn.feature_extraction.text.CountVectorizer` method in the next step. ### Step 2.3: Implementing Bag of Words in scikit-learn ### Now that we have implemented the BoW concept from scratch, let's go ahead and use scikit-learn to do this process in a clean and succinct way. We will use the same document set as we used in the previous step. ``` ''' Here we will look to create a frequency matrix on a smaller document set to make sure we understand how the document-term matrix generation happens. We have created a sample document set 'documents'. ''' documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?'] ``` >>**Instructions:** Import the sklearn.feature_extraction.text.CountVectorizer method and create an instance of it called 'count_vector'. ``` ''' Solution ''' from sklearn.feature_extraction.text import CountVectorizer count_vector = CountVectorizer() ``` ** Data preprocessing with CountVectorizer() ** In Step 2.2, we implemented a version of the CountVectorizer() method from scratch that entailed cleaning our data first. This cleaning involved converting all of our data to lower case and removing all punctuation marks. CountVectorizer() has certain parameters which take care of these steps for us. They are: * `lowercase = True` The `lowercase` parameter has a default value of `True` which converts all of our text to its lower case form. * `token_pattern = (?u)\\b\\w\\w+\\b` The `token_pattern` parameter has a default regular expression value of `(?u)\\b\\w\\w+\\b` which ignores all punctuation marks and treats them as delimiters, while accepting alphanumeric strings of length greater than or equal to 2, as individual tokens or words. * `stop_words` The `stop_words` parameter, if set to `english` will remove all words from our document set that match a list of English stop words which is defined in scikit-learn. Considering the size of our dataset and the fact that we are dealing with SMS messages and not larger text sources like e-mail, we will not be setting this parameter value. You can take a look at all the parameter values of your `count_vector` object by simply printing out the object as follows: ``` ''' Practice node: Print the 'count_vector' object which is an instance of 'CountVectorizer()' ''' print(count_vector) ``` >>**Instructions:** Fit your document dataset to the CountVectorizer object you have created using fit(), and get the list of words which have been categorized as features using the get_feature_names() method. ``` ''' Solution: ''' count_vector.fit(documents) count_vector.get_feature_names() ``` The `get_feature_names()` method returns our feature names for this dataset, which is the set of words that make up our vocabulary for 'documents'. >>** Instructions:** Create a matrix with the rows being each of the 4 documents, and the columns being each word. The corresponding (row, column) value is the frequency of occurrance of that word(in the column) in a particular document(in the row). You can do this using the transform() method and passing in the document data set as the argument. The transform() method returns a matrix of numpy integers, you can convert this to an array using toarray(). Call the array 'doc_array' ``` ''' Solution ''' doc_array = count_vector.transform(documents).toarray() doc_array ``` Now we have a clean representation of the documents in terms of the frequency distribution of the words in them. To make it easier to understand our next step is to convert this array into a dataframe and name the columns appropriately. >>**Instructions:** Convert the array we obtained, loaded into 'doc_array', into a dataframe and set the column names to the word names(which you computed earlier using get_feature_names(). Call the dataframe 'frequency_matrix'. ``` ''' Solution ''' frequency_matrix = pd.DataFrame(doc_array, columns = count_vector.get_feature_names()) frequency_matrix ``` Congratulations! You have successfully implemented a Bag of Words problem for a document dataset that we created. One potential issue that can arise from using this method out of the box is the fact that if our dataset of text is extremely large(say if we have a large collection of news articles or email data), there will be certain values that are more common that others simply due to the structure of the language itself. So for example words like 'is', 'the', 'an', pronouns, grammatical contructs etc could skew our matrix and affect our analyis. There are a couple of ways to mitigate this. One way is to use the `stop_words` parameter and set its value to `english`. This will automatically ignore all words(from our input text) that are found in a built in list of English stop words in scikit-learn. Another way of mitigating this is by using the [tfidf](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer) method. This method is out of scope for the context of this lesson. ### Step 3.1: Training and testing sets ### Now that we have understood how to deal with the Bag of Words problem we can get back to our dataset and proceed with our analysis. Our first step in this regard would be to split our dataset into a training and testing set so we can test our model later. >>**Instructions:** Split the dataset into a training and testing set by using the train_test_split method in sklearn. Split the data using the following variables: * `X_train` is our training data for the 'sms_message' column. * `y_train` is our training data for the 'label' column * `X_test` is our testing data for the 'sms_message' column. * `y_test` is our testing data for the 'label' column Print out the number of rows we have in each our training and testing data. ``` ''' Solution NOTE: sklearn.cross_validation will be deprecated soon to sklearn.model_selection ''' # split into training and testing sets # USE from sklearn.model_selection import train_test_split to avoid seeing deprecation warning. from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(df['sms_message'], df['label'], random_state=1) print('Number of rows in the total set: {}'.format(df.shape[0])) print('Number of rows in the training set: {}'.format(X_train.shape[0])) print('Number of rows in the test set: {}'.format(X_test.shape[0])) ``` ### Step 3.2: Applying Bag of Words processing to our dataset. ### Now that we have split the data, our next objective is to follow the steps from Step 2: Bag of words and convert our data into the desired matrix format. To do this we will be using CountVectorizer() as we did before. There are two steps to consider here: * Firstly, we have to fit our training data (`X_train`) into `CountVectorizer()` and return the matrix. * Secondly, we have to transform our testing data (`X_test`) to return the matrix. Note that `X_train` is our training data for the 'sms_message' column in our dataset and we will be using this to train our model. `X_test` is our testing data for the 'sms_message' column and this is the data we will be using(after transformation to a matrix) to make predictions on. We will then compare those predictions with `y_test` in a later step. For now, we have provided the code that does the matrix transformations for you! ``` ''' [Practice Node] The code for this segment is in 2 parts. Firstly, we are learning a vocabulary dictionary for the training data and then transforming the data into a document-term matrix; secondly, for the testing data we are only transforming the data into a document-term matrix. This is similar to the process we followed in Step 2.3 We will provide the transformed data to students in the variables 'training_data' and 'testing_data'. ''' ''' Solution ''' # Instantiate the CountVectorizer method count_vector = CountVectorizer() # Fit the training data and then return the matrix training_data = count_vector.fit_transform(X_train) # Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer() testing_data = count_vector.transform(X_test) ``` ### Step 4.1: Bayes Theorem implementation from scratch ### Now that we have our dataset in the format that we need, we can move onto the next portion of our mission which is the algorithm we will use to make our predictions to classify a message as spam or not spam. Remember that at the start of the mission we briefly discussed the Bayes theorem but now we shall go into a little more detail. In layman's terms, the Bayes theorem calculates the probability of an event occurring, based on certain other probabilities that are related to the event in question. It is composed of a prior(the probabilities that we are aware of or that is given to us) and the posterior(the probabilities we are looking to compute using the priors). Let us implement the Bayes Theorem from scratch using a simple example. Let's say we are trying to find the odds of an individual having diabetes, given that he or she was tested for it and got a positive result. In the medical field, such probabilies play a very important role as it usually deals with life and death situatuations. We assume the following: `P(D)` is the probability of a person having Diabetes. It's value is `0.01` or in other words, 1% of the general population has diabetes(Disclaimer: these values are assumptions and are not reflective of any medical study). `P(Pos)` is the probability of getting a positive test result. `P(Neg)` is the probability of getting a negative test result. `P(Pos|D)` is the probability of getting a positive result on a test done for detecting diabetes, given that you have diabetes. This has a value `0.9`. In other words the test is correct 90% of the time. This is also called the Sensitivity or True Positive Rate. `P(Neg|~D)` is the probability of getting a negative result on a test done for detecting diabetes, given that you do not have diabetes. This also has a value of `0.9` and is therefore correct, 90% of the time. This is also called the Specificity or True Negative Rate. The Bayes formula is as follows: <img src="images/bayes_formula.png" height="242" width="242"> * `P(A)` is the prior probability of A occuring independantly. In our example this is `P(D)`. This value is given to us. * `P(B)` is the prior probability of B occuring independantly. In our example this is `P(Pos)`. * `P(A|B)` is the posterior probability that A occurs given B. In our example this is `P(D|Pos)`. That is, **the probability of an individual having diabetes, given that, that individual got a positive test result. This is the value that we are looking to calculate.** * `P(B|A)` is the likelihood probability of B occuring, given A. In our example this is `P(Pos|D)`. This value is given to us. Putting our values into the formula for Bayes theorem we get: `P(D|Pos) = (P(D) * P(Pos|D) / P(Pos)` The probability of getting a positive test result `P(Pos)` can be calulated using the Sensitivity and Specificity as follows: `P(Pos) = [P(D) * Sensitivity] + [P(~D) * (1-Specificity))]` ``` ''' Instructions: Calculate probability of getting a positive test result, P(Pos) ''' ''' Solution (skeleton code will be provided) ''' # P(D) p_diabetes = 0.01 # P(~D) p_no_diabetes = 0.99 # Sensitivity or P(Pos|D) p_pos_diabetes = 0.9 # Specificity or P(Neg/~D) p_neg_no_diabetes = 0.9 # P(Pos) p_pos = (p_diabetes * p_pos_diabetes) + (p_no_diabetes * (1 - p_neg_no_diabetes)) print('The probability of getting a positive test result P(Pos) is: {}',format(p_pos)) ``` ** Using all of this information we can calculate our posteriors as follows: ** The probability of an individual having diabetes, given that, that individual got a positive test result: `P(D/Pos) = (P(D) * Sensitivity)) / P(Pos)` The probability of an individual not having diabetes, given that, that individual got a positive test result: `P(~D/Pos) = (P(~D) * (1-Specificity)) / P(Pos)` The sum of our posteriors will always equal `1`. ``` ''' Instructions: Compute the probability of an individual having diabetes, given that, that individual got a positive test result. In other words, compute P(D|Pos). The formula is: P(D|Pos) = (P(D) * P(Pos|D) / P(Pos) ''' ''' Solution ''' # P(D|Pos) p_diabetes_pos = (p_diabetes * p_pos_diabetes) / p_pos print('Probability of an individual having diabetes, given that that individual got a positive test result is:\ ',format(p_diabetes_pos)) ''' Instructions: Compute the probability of an individual not having diabetes, given that, that individual got a positive test result. In other words, compute P(~D|Pos). The formula is: P(~D|Pos) = (P(~D) * P(Pos|~D) / P(Pos) Note that P(Pos/~D) can be computed as 1 - P(Neg/~D). Therefore: P(Pos/~D) = p_pos_no_diabetes = 1 - 0.9 = 0.1 ''' ''' Solution ''' # P(Pos/~D) p_pos_no_diabetes = 0.1 # P(~D|Pos) p_no_diabetes_pos = (p_no_diabetes * p_pos_no_diabetes) / p_pos print ('Probability of an individual not having diabetes, given that that individual got a positive test result is:'\ ,p_no_diabetes_pos) ``` Congratulations! You have implemented Bayes theorem from scratch. Your analysis shows that even if you get a positive test result, there is only a 8.3% chance that you actually have diabetes and a 91.67% chance that you do not have diabetes. This is of course assuming that only 1% of the entire population has diabetes which of course is only an assumption. ** What does the term 'Naive' in 'Naive Bayes' mean ? ** The term 'Naive' in Naive Bayes comes from the fact that the algorithm considers the features that it is using to make the predictions to be independent of each other, which may not always be the case. So in our Diabetes example, we are considering only one feature, that is the test result. Say we added another feature, 'exercise'. Let's say this feature has a binary value of `0` and `1`, where the former signifies that the individual exercises less than or equal to 2 days a week and the latter signifies that the individual exercises greater than or equal to 3 days a week. If we had to use both of these features, namely the test result and the value of the 'exercise' feature, to compute our final probabilities, Bayes' theorem would fail. Naive Bayes' is an extension of Bayes' theorem that assumes that all the features are independent of each other. ### Step 4.2: Naive Bayes implementation from scratch ### Now that you have understood the ins and outs of Bayes Theorem, we will extend it to consider cases where we have more than feature. Let's say that we have two political parties' candidates, 'Jill Stein' of the Green Party and 'Gary Johnson' of the Libertarian Party and we have the probabilities of each of these candidates saying the words 'freedom', 'immigration' and 'environment' when they give a speech: * Probability that Jill Stein says 'freedom': 0.1 ---------> `P(F|J)` * Probability that Jill Stein says 'immigration': 0.1 -----> `P(I|J)` * Probability that Jill Stein says 'environment': 0.8 -----> `P(E|J)` * Probability that Gary Johnson says 'freedom': 0.7 -------> `P(F|G)` * Probability that Gary Johnson says 'immigration': 0.2 ---> `P(I|G)` * Probability that Gary Johnson says 'environment': 0.1 ---> `P(E|G)` And let us also assume that the probablility of Jill Stein giving a speech, `P(J)` is `0.5` and the same for Gary Johnson, `P(G) = 0.5`. Given this, what if we had to find the probabilities of Jill Stein saying the words 'freedom' and 'immigration'? This is where the Naive Bayes'theorem comes into play as we are considering two features, 'freedom' and 'immigration'. Now we are at a place where we can define the formula for the Naive Bayes' theorem: <img src="images/naivebayes.png" height="342" width="342"> Here, `y` is the class variable or in our case the name of the candidate and `x1` through `xn` are the feature vectors or in our case the individual words. The theorem makes the assumption that each of the feature vectors or words (`xi`) are independent of each other. To break this down, we have to compute the following posterior probabilities: * `P(J|F,I)`: Probability of Jill Stein saying the words Freedom and Immigration. Using the formula and our knowledge of Bayes' theorem, we can compute this as follows: `P(J|F,I)` = `(P(J) * P(F|J) * P(I|J)) / P(F,I)`. Here `P(F,I)` is the probability of the words 'freedom' and 'immigration' being said in a speech. * `P(G|F,I)`: Probability of Gary Johnson saying the words Freedom and Immigration. Using the formula, we can compute this as follows: `P(G|F,I)` = `(P(G) * P(F|G) * P(I|G)) / P(F,I)` ``` ''' Instructions: Compute the probability of the words 'freedom' and 'immigration' being said in a speech, or P(F,I). The first step is multiplying the probabilities of Jill Stein giving a speech with her individual probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_j_text The second step is multiplying the probabilities of Gary Johnson giving a speech with his individual probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_g_text The third step is to add both of these probabilities and you will get P(F,I). ''' ''' Solution: Step 1 ''' # P(J) p_j = 0.5 # P(F/J) p_j_f = 0.1 # P(I/J) p_j_i = 0.1 p_j_text = p_j * p_j_f * p_j_i print(p_j_text) ''' Solution: Step 2 ''' # P(G) p_g = 0.5 # P(F/G) p_g_f = 0.7 # P(I/G) p_g_i = 0.2 p_g_text = p_g * p_g_f * p_g_i print(p_g_text) ''' Solution: Step 3: Compute P(F,I) and store in p_f_i ''' p_f_i = p_j_text + p_g_text print('Probability of words freedom and immigration being said are: ', format(p_f_i)) ``` Now we can compute the probability of `P(J|F,I)`, that is the probability of Jill Stein saying the words Freedom and Immigration and `P(G|F,I)`, that is the probability of Gary Johnson saying the words Freedom and Immigration. ``` ''' Instructions: Compute P(J|F,I) using the formula P(J|F,I) = (P(J) * P(F|J) * P(I|J)) / P(F,I) and store it in a variable p_j_fi ''' ''' Solution ''' p_j_fi = p_j_text / p_f_i print('The probability of Jill Stein saying the words Freedom and Immigration: ', format(p_j_fi)) ''' Instructions: Compute P(G|F,I) using the formula P(G|F,I) = (P(G) * P(F|G) * P(I|G)) / P(F,I) and store it in a variable p_g_fi ''' ''' Solution ''' p_g_fi = p_g_text / p_f_i print('The probability of Gary Johnson saying the words Freedom and Immigration: ', format(p_g_fi)) ``` And as we can see, just like in the Bayes' theorem case, the sum of our posteriors is equal to 1. Congratulations! You have implemented the Naive Bayes' theorem from scratch. Our analysis shows that there is only a 6.6% chance that Jill Stein of the Green Party uses the words 'freedom' and 'immigration' in her speech as compard the the 93.3% chance for Gary Johnson of the Libertarian party. Another more generic example of Naive Bayes' in action is as when we search for the term 'Sacramento Kings' in a search engine. In order for us to get the results pertaining to the Scramento Kings NBA basketball team, the search engine needs to be able to associate the two words together and not treat them individually, in which case we would get results of images tagged with 'Sacramento' like pictures of city landscapes and images of 'Kings' which could be pictures of crowns or kings from history when what we are looking to get are images of the basketball team. This is a classic case of the search engine treating the words as independant entities and hence being 'naive' in its approach. Applying this to our problem of classifying messages as spam, the Naive Bayes algorithm *looks at each word individually and not as associated entities* with any kind of link between them. In the case of spam detectors, this usually works as there are certain red flag words which can almost guarantee its classification as spam, for example emails with words like 'viagra' are usually classified as spam. ### Step 5: Naive Bayes implementation using scikit-learn ### Thankfully, sklearn has several Naive Bayes implementations that we can use and so we do not have to do the math from scratch. We will be using sklearns `sklearn.naive_bayes` method to make predictions on our dataset. Specifically, we will be using the multinomial Naive Bayes implementation. This particular classifier is suitable for classification with discrete features (such as in our case, word counts for text classification). It takes in integer word counts as its input. On the other hand Gaussian Naive Bayes is better suited for continuous data as it assumes that the input data has a Gaussian(normal) distribution. ``` ''' Instructions: We have loaded the training data into the variable 'training_data' and the testing data into the variable 'testing_data'. Import the MultinomialNB classifier and fit the training data into the classifier using fit(). Name your classifier 'naive_bayes'. You will be training the classifier using 'training_data' and y_train' from our split earlier. ''' ''' Solution ''' from sklearn.naive_bayes import MultinomialNB naive_bayes = MultinomialNB() naive_bayes.fit(training_data, y_train) ''' Instructions: Now that our algorithm has been trained using the training data set we can now make some predictions on the test data stored in 'testing_data' using predict(). Save your predictions into the 'predictions' variable. ''' ''' Solution ''' predictions = naive_bayes.predict(testing_data) ``` Now that predictions have been made on our test set, we need to check the accuracy of our predictions. ### Step 6: Evaluating our model ### Now that we have made predictions on our test set, our next goal is to evaluate how well our model is doing. There are various mechanisms for doing so, but first let's do quick recap of them. ** Accuracy ** measures how often the classifier makes the correct prediction. It’s the ratio of the number of correct predictions to the total number of predictions (the number of test data points). ** Precision ** tells us what proportion of messages we classified as spam, actually were spam. It is a ratio of true positives(words classified as spam, and which are actually spam) to all positives(all words classified as spam, irrespective of whether that was the correct classification), in other words it is the ratio of `[True Positives/(True Positives + False Positives)]` ** Recall(sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam. It is a ratio of true positives(words classified as spam, and which are actually spam) to all the words that were actually spam, in other words it is the ratio of `[True Positives/(True Positives + False Negatives)]` For classification problems that are skewed in their classification distributions like in our case, for example if we had a 100 text messages and only 2 were spam and the rest 98 weren't, accuracy by itself is not a very good metric. We could classify 90 messages as not spam(including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam(all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the F1 score, which is weighted average of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score. We will be using all 4 metrics to make sure our model does well. For all 4 metrics whose values can range from 0 to 1, having a score as close to 1 as possible is a good indicator of how well our model is doing. ``` ''' Instructions: Compute the accuracy, precision, recall and F1 scores of your model using your test data 'y_test' and the predictions you made earlier stored in the 'predictions' variable. ''' ''' Solution ''' from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score print('Accuracy score: ', format(accuracy_score(y_test, predictions))) print('Precision score: ', format(precision_score(y_test, predictions))) print('Recall score: ', format(recall_score(y_test, predictions))) print('F1 score: ', format(f1_score(y_test, predictions))) ``` ### Step 7: Conclusion ### One of the major advantages that Naive Bayes has over other classification algorithms is its ability to handle an extremely large number of features. In our case, each word is treated as a feature and there are thousands of different words. Also, it performs well even with the presence of irrelevant features and is relatively unaffected by them. The other major advantage it has is its relative simplicity. Naive Bayes' works well right out of the box and tuning it's parameters is rarely ever necessary, except usually in cases where the distribution of the data is known. It rarely ever overfits the data. Another important advantage is that its model training and prediction times are very fast for the amount of data it can handle. All in all, Naive Bayes' really is a gem of an algorithm! Congratulations! You have succesfully designed a model that can efficiently predict if an SMS message is spam or not! Thank you for learning with us!
github_jupyter
''' Solution ''' import pandas as pd # Dataset from - https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection df = pd.read_table('smsspamcollection/SMSSpamCollection', sep='\t', header=None, names=['label', 'sms_message']) # Output printing out first 5 columns df.head() ''' Solution ''' df['label'] = df.label.map({'ham':0, 'spam':1}) print(df.shape) df.head() # returns (rows, columns) documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?'] ''' Solution: ''' documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?'] lower_case_documents = [] for i in documents: lower_case_documents.append(i.lower()) print(lower_case_documents) ''' Solution: ''' sans_punctuation_documents = [] import string for i in lower_case_documents: sans_punctuation_documents.append(i.translate(str.maketrans('', '', string.punctuation))) print(sans_punctuation_documents) ''' Solution: ''' preprocessed_documents = [] for i in sans_punctuation_documents: preprocessed_documents.append(i.split(' ')) print(preprocessed_documents) ''' Solution ''' frequency_list = [] import pprint from collections import Counter for i in preprocessed_documents: frequency_counts = Counter(i) frequency_list.append(frequency_counts) pprint.pprint(frequency_list) ''' Here we will look to create a frequency matrix on a smaller document set to make sure we understand how the document-term matrix generation happens. We have created a sample document set 'documents'. ''' documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?'] ''' Solution ''' from sklearn.feature_extraction.text import CountVectorizer count_vector = CountVectorizer() ''' Practice node: Print the 'count_vector' object which is an instance of 'CountVectorizer()' ''' print(count_vector) ''' Solution: ''' count_vector.fit(documents) count_vector.get_feature_names() ''' Solution ''' doc_array = count_vector.transform(documents).toarray() doc_array ''' Solution ''' frequency_matrix = pd.DataFrame(doc_array, columns = count_vector.get_feature_names()) frequency_matrix ''' Solution NOTE: sklearn.cross_validation will be deprecated soon to sklearn.model_selection ''' # split into training and testing sets # USE from sklearn.model_selection import train_test_split to avoid seeing deprecation warning. from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(df['sms_message'], df['label'], random_state=1) print('Number of rows in the total set: {}'.format(df.shape[0])) print('Number of rows in the training set: {}'.format(X_train.shape[0])) print('Number of rows in the test set: {}'.format(X_test.shape[0])) ''' [Practice Node] The code for this segment is in 2 parts. Firstly, we are learning a vocabulary dictionary for the training data and then transforming the data into a document-term matrix; secondly, for the testing data we are only transforming the data into a document-term matrix. This is similar to the process we followed in Step 2.3 We will provide the transformed data to students in the variables 'training_data' and 'testing_data'. ''' ''' Solution ''' # Instantiate the CountVectorizer method count_vector = CountVectorizer() # Fit the training data and then return the matrix training_data = count_vector.fit_transform(X_train) # Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer() testing_data = count_vector.transform(X_test) ''' Instructions: Calculate probability of getting a positive test result, P(Pos) ''' ''' Solution (skeleton code will be provided) ''' # P(D) p_diabetes = 0.01 # P(~D) p_no_diabetes = 0.99 # Sensitivity or P(Pos|D) p_pos_diabetes = 0.9 # Specificity or P(Neg/~D) p_neg_no_diabetes = 0.9 # P(Pos) p_pos = (p_diabetes * p_pos_diabetes) + (p_no_diabetes * (1 - p_neg_no_diabetes)) print('The probability of getting a positive test result P(Pos) is: {}',format(p_pos)) ''' Instructions: Compute the probability of an individual having diabetes, given that, that individual got a positive test result. In other words, compute P(D|Pos). The formula is: P(D|Pos) = (P(D) * P(Pos|D) / P(Pos) ''' ''' Solution ''' # P(D|Pos) p_diabetes_pos = (p_diabetes * p_pos_diabetes) / p_pos print('Probability of an individual having diabetes, given that that individual got a positive test result is:\ ',format(p_diabetes_pos)) ''' Instructions: Compute the probability of an individual not having diabetes, given that, that individual got a positive test result. In other words, compute P(~D|Pos). The formula is: P(~D|Pos) = (P(~D) * P(Pos|~D) / P(Pos) Note that P(Pos/~D) can be computed as 1 - P(Neg/~D). Therefore: P(Pos/~D) = p_pos_no_diabetes = 1 - 0.9 = 0.1 ''' ''' Solution ''' # P(Pos/~D) p_pos_no_diabetes = 0.1 # P(~D|Pos) p_no_diabetes_pos = (p_no_diabetes * p_pos_no_diabetes) / p_pos print ('Probability of an individual not having diabetes, given that that individual got a positive test result is:'\ ,p_no_diabetes_pos) ''' Instructions: Compute the probability of the words 'freedom' and 'immigration' being said in a speech, or P(F,I). The first step is multiplying the probabilities of Jill Stein giving a speech with her individual probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_j_text The second step is multiplying the probabilities of Gary Johnson giving a speech with his individual probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_g_text The third step is to add both of these probabilities and you will get P(F,I). ''' ''' Solution: Step 1 ''' # P(J) p_j = 0.5 # P(F/J) p_j_f = 0.1 # P(I/J) p_j_i = 0.1 p_j_text = p_j * p_j_f * p_j_i print(p_j_text) ''' Solution: Step 2 ''' # P(G) p_g = 0.5 # P(F/G) p_g_f = 0.7 # P(I/G) p_g_i = 0.2 p_g_text = p_g * p_g_f * p_g_i print(p_g_text) ''' Solution: Step 3: Compute P(F,I) and store in p_f_i ''' p_f_i = p_j_text + p_g_text print('Probability of words freedom and immigration being said are: ', format(p_f_i)) ''' Instructions: Compute P(J|F,I) using the formula P(J|F,I) = (P(J) * P(F|J) * P(I|J)) / P(F,I) and store it in a variable p_j_fi ''' ''' Solution ''' p_j_fi = p_j_text / p_f_i print('The probability of Jill Stein saying the words Freedom and Immigration: ', format(p_j_fi)) ''' Instructions: Compute P(G|F,I) using the formula P(G|F,I) = (P(G) * P(F|G) * P(I|G)) / P(F,I) and store it in a variable p_g_fi ''' ''' Solution ''' p_g_fi = p_g_text / p_f_i print('The probability of Gary Johnson saying the words Freedom and Immigration: ', format(p_g_fi)) ''' Instructions: We have loaded the training data into the variable 'training_data' and the testing data into the variable 'testing_data'. Import the MultinomialNB classifier and fit the training data into the classifier using fit(). Name your classifier 'naive_bayes'. You will be training the classifier using 'training_data' and y_train' from our split earlier. ''' ''' Solution ''' from sklearn.naive_bayes import MultinomialNB naive_bayes = MultinomialNB() naive_bayes.fit(training_data, y_train) ''' Instructions: Now that our algorithm has been trained using the training data set we can now make some predictions on the test data stored in 'testing_data' using predict(). Save your predictions into the 'predictions' variable. ''' ''' Solution ''' predictions = naive_bayes.predict(testing_data) ''' Instructions: Compute the accuracy, precision, recall and F1 scores of your model using your test data 'y_test' and the predictions you made earlier stored in the 'predictions' variable. ''' ''' Solution ''' from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score print('Accuracy score: ', format(accuracy_score(y_test, predictions))) print('Precision score: ', format(precision_score(y_test, predictions))) print('Recall score: ', format(recall_score(y_test, predictions))) print('F1 score: ', format(f1_score(y_test, predictions)))
0.554832
0.992816
# Precipitation in the Meteorology component **Goal:** In this example, I give the `Meteorology` component a **time series** of precipitation values and check whether it produces output when the model state is updated. Define a helpful constant: ``` mps_to_mmph = 1000 * 3600 ``` Programmatically create a file holding the precipitation rate time series. This will mimic what I'll need to do in WMT, where I'll have access to the model time step and run duration. Start by defining the precipitation rate values: ``` import numpy as np n_steps = 10 # can get from cfg file precip_rates = np.linspace(5, 20, num=n_steps, endpoint=False) precip_rates ``` Next, write the values to a file to the **input** directory, where it's expected by the cfg file: ``` np.savetxt('./input/precip_rates.txt', precip_rates, fmt='%6.2f') ``` Check the file: ``` cat input/precip_rates.txt ``` ## BMI component Import the BMI `Meteorology` component and create an instance: ``` from topoflow.components.met_base import met_component m = met_component() ``` Initialize the model. A value of snow depth `h_snow` is needed for the model to update. ``` m.initialize('./input/meteorology-1.cfg') m.h_snow = 0.0 # Needed for update ``` Unlike when `P` is a scalar, the initial model precipitation volume flux is the first value from **precip_rates.txt**: ``` precip = m.get_value('atmosphere_water__precipitation_leq-volume_flux') # `P` internally print type(precip) print precip.size precip * mps_to_mmph ``` Advance the model by one time step: ``` m.update() print '\nCurrent time: {} s'.format(m.get_current_time()) ``` Unlike the scalar case, there's an output volume flux of precipitation: ``` print precip * mps_to_mmph # note that this is a reference, so it'll take the current value of `P` ``` Advance the model to the end, saving the model time and output `P` values (converted back to mm/hr for convenience) at each step: ``` time = [m.get_current_time().copy()] flux = [precip.copy() * mps_to_mmph] while m.get_current_time() < m.get_end_time(): m.update() time.append(m.get_current_time().copy()) flux.append(m.get_value('atmosphere_water__precipitation_leq-volume_flux').copy() * mps_to_mmph) ``` Check the time and flux values: ``` time flux ``` **Result:** Fails. Input precipipation rates do not match output precipitation volume flux because of changes we made to TopoFlow source. ## Babel-wrapped component Import the Babel-wrapped `Meteorology` component and create an instance: ``` from cmt.components import Meteorology met = Meteorology() ``` Initialize the model. ``` %cd input met.initialize('meteorology-1.cfg') ``` The initial model precipitation volume flux is the first value from **precip_rates.txt**: ``` bprecip = met.get_value('atmosphere_water__precipitation_leq-volume_flux') print type(bprecip) print bprecip.size print bprecip.shape bprecip * mps_to_mmph ``` Advance the model to the end, saving the model time and output `P` values (converted back to mm/hr for convenience) at each step: ``` time = [met.get_current_time()] flux = [bprecip.max() * mps_to_mmph] count = 1 while met.get_current_time() < met.get_end_time(): met.update(met.get_time_step()*count) time.append(met.get_current_time()) flux.append(met.get_value('atmosphere_water__precipitation_leq-volume_flux').max() * mps_to_mmph) count += 1 ``` Check the time and flux values (noting that I've included the time = 0.0 value here): ``` time flux ``` **Result:** Fails. Input precipipation rates do not match output precipitation volume flux because of changes we made to TopoFlow source.
github_jupyter
mps_to_mmph = 1000 * 3600 import numpy as np n_steps = 10 # can get from cfg file precip_rates = np.linspace(5, 20, num=n_steps, endpoint=False) precip_rates np.savetxt('./input/precip_rates.txt', precip_rates, fmt='%6.2f') cat input/precip_rates.txt from topoflow.components.met_base import met_component m = met_component() m.initialize('./input/meteorology-1.cfg') m.h_snow = 0.0 # Needed for update precip = m.get_value('atmosphere_water__precipitation_leq-volume_flux') # `P` internally print type(precip) print precip.size precip * mps_to_mmph m.update() print '\nCurrent time: {} s'.format(m.get_current_time()) print precip * mps_to_mmph # note that this is a reference, so it'll take the current value of `P` time = [m.get_current_time().copy()] flux = [precip.copy() * mps_to_mmph] while m.get_current_time() < m.get_end_time(): m.update() time.append(m.get_current_time().copy()) flux.append(m.get_value('atmosphere_water__precipitation_leq-volume_flux').copy() * mps_to_mmph) time flux from cmt.components import Meteorology met = Meteorology() %cd input met.initialize('meteorology-1.cfg') bprecip = met.get_value('atmosphere_water__precipitation_leq-volume_flux') print type(bprecip) print bprecip.size print bprecip.shape bprecip * mps_to_mmph time = [met.get_current_time()] flux = [bprecip.max() * mps_to_mmph] count = 1 while met.get_current_time() < met.get_end_time(): met.update(met.get_time_step()*count) time.append(met.get_current_time()) flux.append(met.get_value('atmosphere_water__precipitation_leq-volume_flux').max() * mps_to_mmph) count += 1 time flux
0.353317
0.985072
# Quantifying relationships between variables Elements of Data Science by [Allen Downey](https://allendowney.com) [MIT License](https://opensource.org/licenses/MIT) ``` # If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install empiricaldist ``` ## Correlation In [Notebook 9](https://colab.research.google.com/github/AllenDowney/ElementsOfDataScience/blob/master/09_relationships.ipynb), I suggest that using correlation to summarize the relationship between two variables is problematic: 1. Correlation only quantifies the linear relationship between variables; if the relationship is non-linear, correlation tends to underestimate it. 2. Correlation quantifies predictability, but not the "strength" of the relationship in terms of slope. In practice, slope is often more important. In this notebook I explore two more reasons correlation is not a great statistic: 3. It is hard to interpret as a measure of predictability. 4. It makes the relationship between variables sound more impressive than it is. As an example, I quantify the relationship between SAT scores and IQ tests. I know this is a contentious topic; people have strong feelings about standardized tests, IQ, and the consequences of using standardized tests for college admissions. I chose this example because it is a topic people care about, and I think the analysis I present can contribute to the discussion. But a similar analysis applies in any domain where we use a correlation to quantify the strength of a relationship between two variables. ## SAT scores and IQ According to Frey and Detterman, "[Scholastic Assessment or g? The relationship between the Scholastic Assessment Test and general cognitive ability](https://www.ncbi.nlm.nih.gov/pubmed/15147489)", the correlation between SAT scores and general intelligence ($g$) is 0.82. This is just one study, and if you read the paper, you might have questions about the methodology. But for now I will take this estimate at face value. If you have another source that reports a different correlation, feel free to plug in another value and run my analysis again. For concreteness, I will generate a fake dataset that has the same mean and standard deviation as the SAT and the IQ, with a correlation of 0.82. According to [this source](https://blog.prepscholar.com/sat-standard-deviation), the mean combined SAT score is 1060 with standard deviation 210. Here's a sample from a normal distribution with these parameters: ``` import numpy as np np.random.seed(17) mean_sat = 1060 sigma1 = 210 sat = np.random.normal(mean_sat, sigma1, size=1000) ``` And here's what the distribution looks like. ``` from empiricaldist import Cdf import matplotlib.pyplot as plt Cdf.from_seq(sat).plot() plt.xlabel('SAT Score') plt.ylabel('CDF') plt.title('Distribution of SAT Scores'); ``` By design, IQ has mean 100 and standard deviation 15: ``` mean_iq = 100 sigma2 = 15 ``` Here's a process for generating a sample of IQs with the given mean, standard deviation, and correlation with SAT scores: ``` rho = 0.82 mus = mean_iq + rho * sigma2 / sigma1 * (sat - mean_sat) var = (1 - rho**2) * sigma2**2 sigma = np.sqrt(var) iq = np.random.normal(mus, sigma) ``` Here's what the distribution looks like. ``` Cdf.from_seq(iq).plot() plt.xlabel('IQ Score') plt.ylabel('CDF') plt.title('Distribution of IQ Scores'); ``` The mean and standard deviation are near 100 and 15, as desired. ``` iq.mean(), iq.std() ``` And the correlation is near 0.82. ``` a = np.corrcoef(sat, iq) rho_actual = a[0, 1] rho_actual ``` This scatterplot shows the relationship between IQ and SAT in my simulated dataset. ``` plt.plot(sat, iq, 'o', alpha=0.1, markersize=4) plt.xlabel('SAT score') plt.ylabel('IQ') plt.title('Scatter plot of IQ versus SAT score'); ``` ## Linear regression We can use SciPy to estimate the slope and intercept of the linear regression line. ``` from scipy.stats import linregress res = linregress(sat, iq) res ``` The result includes `intercept` and `slope`, which we can use to compute the predicted IQ for each SAT score in the sample. ``` pred_iq = res.intercept + res.slope * sat ``` Here's the scatter plot again with the regression line. ``` plt.plot(sat, iq, 'o', alpha=0.1, markersize=4) plt.plot(sat, pred_iq, alpha=0.6) plt.xlabel('SAT score') plt.ylabel('IQ') plt.title('Scatter plot of IQ versus SAT score'); ``` ## Residuals Now we can compute the residuals, which are the errors for each prediction. ``` resid = iq - pred_iq ``` We can use the residuals to compute the coefficient of determination, $R^2$. ``` R2 = 1 - resid.var() / iq.var() R2 ``` $R^2$ measures the part of the variance in the dependent variable (IQ) that is "explained by" the predictor (SAT score). Or, to say that differently, if we use SAT scores to guess IQs, the variance of the errors will be 66% lower than the variance in IQ scores. That sounds less impressive than a correlation of 0.82. And that's because there is a relationship between correlation and the coefficient of determination: $R^2 = \rho^2$ That is, the coefficient of determination is correlation squared. We can confirm that this equation holds in this example: ``` R2, rho_actual**2 ``` Since correlation is less than 1, $R^2$ is generally smaller than $\rho$. ``` R2, rho_actual ``` If you have a choice of reporting $R^2$ or correlation, I suggest you report $R^2$ because it is more meaningful (percentage reduction in variance) and less falsely impressive. However, I think $R^2$ is also problematic, because reducing variance is not usually what we care about. If the goal is to quantify the quality of a prediction, it is better to use a metric of error that means something in the context of the problem. ## MAE One option is mean absolute error (MAE) which is just what it says: the mean of the absolute values of the residuals. ``` MAE_after = np.abs(resid).std() MAE_after ``` If you use someone's SAT score to guess their IQ, you should expect to be off by about 5 points on average. One way to put that in context is to compare it to the MAE if we don't know SAT scores. In that case, the best strategy is to guess the mean every time. ``` deviation = iq - iq.mean() MAE_before = np.abs(deviation).std() MAE_before ``` If you always guess 100, you should expect to be off by about 8.5 points on average. We can use these results to compute the percentage improvement in MAE, with and without SAT scores: ``` improvement = 1 - MAE_after / MAE_before improvement ``` So we can say that knowing SAT scores us decreases the MAE by 44%. That is certainly an improvement, but notice that it sounds less impressive than $R^2 = 0.66$ and much less impressive than $\rho = 0.82$. ## RMSE Another option is RMSE (root mean squared error) which is the standard deviation of the residuals: ``` RMSE_after = resid.std() RMSE_after ``` We can compare that to RMSE without SAT scores, which is the standard deviation of IQ: ``` RMSE_before = iq.std() RMSE_before ``` And here's the improvement: ``` improvement = 1 - RMSE_after / RMSE_before improvement ``` If you know someone's SAT score, you can decrease your RMSE by 42%. There is no compelling reason to prefer RMSE over MAE, but it has practical one advantage: we don't need the data to compute the RMSE. We can derive it from the variance of IQ and $R^2$: $R^2 = 1 - Var(resid) ~/~ Var(iq)$ $Var(resid) = (1 - R^2)~Var(iq)$ $Std(resid) = \sqrt{(1 - R^2) Var(iq)}$ ``` np.sqrt((1-R2) * iq.var()), RMSE_after ``` ## Percentage error One other way to express the value of SAT scores for predicting IQ is the mean absolute percentage error (MAPE). Again, if we don't know SAT scores, the best strategy is to guess the mean. In that case the MAPE is: ``` deviation = iq - iq.mean() MAPE_before = np.abs(deviation / iq).mean() * 100 MAPE_before ``` If we always guess the mean, we expect to be off by 12%, on average. If we use SAT scores to make better guesses, the MAPE is lower: ``` MAPE_after = np.abs(resid / iq).mean() * 100 MAPE_after ``` So we expect to be off by 6.6% on average. And we can quantify the improvement like this: ``` improvement = 1 - MAPE_after / MAPE_before improvement ``` Using SAT scores to predict IQ decreases the mean absolute percentage error by 42%. I included MAPE in this discussion because it is a good choice in some contexts, but this is probably not one of them. Using MAPE implies that an error of 1 IQ point is more important for someone with low IQ and less important for someone with high IQ. In this context, it's not clear whether that's true. ## Correlation overstates usefulness In this example, we've looked at one value of correlation, 0.82. We can do the same analysis for a range of values: ``` rhos = np.linspace(0, 1, 201) R2s = rhos**2 improvements = 1 - np.sqrt(1-R2s) ``` And here's what it looks like: ``` plt.plot(rhos, rhos, linestyle='dotted', color='gray', alpha=0.4) plt.plot(rhos, R2s, label='Reduction in variance ($R^2$)') plt.plot(rhos, improvements, label='Reduction in RMSE') plt.xlabel('Correlation coefficient (ρ)') plt.title('Correlation overstates the strength\nof relationship between variables') plt.legend(); ``` For all values except 0 and 1, $R^2$ is less than correlation, $\rho$. And the improvement in RMSE is less than that. ## Summary Correlation is a problematic statistic because it sounds more impressive than it is. Coefficient of determination, $R^2$, is better because it has a more natural interpretation: percentage reduction in variance. But reducing variance it usually not what we care about. I think it is better to choose a measurement of error that is meaningful in context, possibly one of: * MAE: Mean absolute error * RMSE: Root mean squared error * MAPE: Mean absolute percentage error Which one of these is most meaningful depends on the cost function. Does the cost of being wrong depend on the absolute error, squared error, or percentage error? If so, that should guide your choice. One advantage of RMSE is that we don't need the data to compute it; we only need the variance of the dependent variable and either $\rho$ or $R^2$. In this example, the correlation is 0.82, which sounds much more impressive than it is. $R^2$ is 0.66, which means we can reduce variance by 66%. But that also sounds more impressive than it is. Using SAT scores to predict IQ, we can reduce MAE by 44%; we can reduce MAPE by 42%, and we can reduce MAPE by 42%. Reporting any of these is more meaningful than reporting correlation or $R^2$.
github_jupyter
# If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install empiricaldist import numpy as np np.random.seed(17) mean_sat = 1060 sigma1 = 210 sat = np.random.normal(mean_sat, sigma1, size=1000) from empiricaldist import Cdf import matplotlib.pyplot as plt Cdf.from_seq(sat).plot() plt.xlabel('SAT Score') plt.ylabel('CDF') plt.title('Distribution of SAT Scores'); mean_iq = 100 sigma2 = 15 rho = 0.82 mus = mean_iq + rho * sigma2 / sigma1 * (sat - mean_sat) var = (1 - rho**2) * sigma2**2 sigma = np.sqrt(var) iq = np.random.normal(mus, sigma) Cdf.from_seq(iq).plot() plt.xlabel('IQ Score') plt.ylabel('CDF') plt.title('Distribution of IQ Scores'); iq.mean(), iq.std() a = np.corrcoef(sat, iq) rho_actual = a[0, 1] rho_actual plt.plot(sat, iq, 'o', alpha=0.1, markersize=4) plt.xlabel('SAT score') plt.ylabel('IQ') plt.title('Scatter plot of IQ versus SAT score'); from scipy.stats import linregress res = linregress(sat, iq) res pred_iq = res.intercept + res.slope * sat plt.plot(sat, iq, 'o', alpha=0.1, markersize=4) plt.plot(sat, pred_iq, alpha=0.6) plt.xlabel('SAT score') plt.ylabel('IQ') plt.title('Scatter plot of IQ versus SAT score'); resid = iq - pred_iq R2 = 1 - resid.var() / iq.var() R2 R2, rho_actual**2 R2, rho_actual MAE_after = np.abs(resid).std() MAE_after deviation = iq - iq.mean() MAE_before = np.abs(deviation).std() MAE_before improvement = 1 - MAE_after / MAE_before improvement RMSE_after = resid.std() RMSE_after RMSE_before = iq.std() RMSE_before improvement = 1 - RMSE_after / RMSE_before improvement np.sqrt((1-R2) * iq.var()), RMSE_after deviation = iq - iq.mean() MAPE_before = np.abs(deviation / iq).mean() * 100 MAPE_before MAPE_after = np.abs(resid / iq).mean() * 100 MAPE_after improvement = 1 - MAPE_after / MAPE_before improvement rhos = np.linspace(0, 1, 201) R2s = rhos**2 improvements = 1 - np.sqrt(1-R2s) plt.plot(rhos, rhos, linestyle='dotted', color='gray', alpha=0.4) plt.plot(rhos, R2s, label='Reduction in variance ($R^2$)') plt.plot(rhos, improvements, label='Reduction in RMSE') plt.xlabel('Correlation coefficient (ρ)') plt.title('Correlation overstates the strength\nof relationship between variables') plt.legend();
0.633524
0.990188
# Credit Risk Resampling Techniques ``` import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd from pathlib import Path from collections import Counter ``` # Read the CSV into DataFrame ``` # Load the data file_path = Path('Resources/lending_data.csv') df = pd.read_csv(file_path) df.head() ``` # Split the Data into Training and Testing ``` # Create our features X = df.drop(columns={"loan_status", "homeowner"}) # Create our target y = df["loan_status"] X.describe() # Check the balance of our target values y.value_counts() # Create X_train, X_test, y_train, y_test from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) ``` ## Data Pre-Processing Scale the training and testing data using the `StandardScaler` from `sklearn`. Remember that when scaling the data, you only scale the features data (`X_train` and `X_testing`). ``` # Create the StandardScaler instance from sklearn.preprocessing import StandardScaler scaler = StandardScaler() # Fit the Standard Scaler with the training data # When fitting scaling functions, only train on the training dataset X_scaler = scaler.fit(X_train) # Scale the training and testing data X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) ``` # Simple Logistic Regression ``` from sklearn.linear_model import LogisticRegression model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_train, y_train) # Calculated the balanced accuracy score from sklearn.metrics import balanced_accuracy_score y_pred = model.predict(X_test) balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report from imblearn.metrics import classification_report_imbalanced print(classification_report_imbalanced(y_test, y_pred)) ``` # Oversampling In this section, you will compare two oversampling algorithms to determine which algorithm results in the best performance. You will oversample the data using the naive random oversampling algorithm and the SMOTE algorithm. For each algorithm, be sure to complete the folliowing steps: 1. View the count of the target classes using `Counter` from the collections library. 3. Use the resampled data to train a logistic regression model. 3. Calculate the balanced accuracy score from sklearn.metrics. 4. Print the confusion matrix from sklearn.metrics. 5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn. Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests ### Naive Random Oversampling ``` # Resample the training data with the RandomOversampler from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(random_state=1) X_resampled, y_resampled = ros.fit_resample(X_train, y_train) # View the count of target classes with Counter Counter(y_resampled) # Train the Logistic Regression model using the resampled data model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) # Calculated the balanced accuracy score y_pred = model.predict(X_test) balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report from imblearn.metrics import classification_report_imbalanced print(classification_report_imbalanced(y_test, y_pred)) ``` ### SMOTE Oversampling ``` # Resample the training data with SMOTE from imblearn.over_sampling import SMOTE sm = SMOTE(random_state=1, sampling_strategy=1) X_resampled, y_resampled = sm.fit_resample(X_train, y_train) # View the count of target classes with Counter Counter(y_resampled) # Train the Logistic Regression model using the resampled data model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) # Calculated the balanced accuracy score y_pred = model.predict(X_test) balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report print(classification_report_imbalanced(y_test, y_pred)) ``` # Undersampling In this section, you will test an undersampling algorithm to determine which algorithm results in the best performance compared to the oversampling algorithms above. You will undersample the data using the Cluster Centroids algorithm and complete the folliowing steps: 1. View the count of the target classes using `Counter` from the collections library. 3. Use the resampled data to train a logistic regression model. 3. Calculate the balanced accuracy score from sklearn.metrics. 4. Display the confusion matrix from sklearn.metrics. 5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn. Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests ``` # Resample the data using the ClusterCentroids resampler from imblearn.under_sampling import ClusterCentroids cc = ClusterCentroids(random_state=1) X_resampled, y_resampled = cc.fit_resample(X_train, y_train) # View the count of target classes with Counter Counter(y_resampled) # Train the Logistic Regression model using the resampled data model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) y_pred = model.predict(X_test) # Calculate the balanced accuracy score balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report print(classification_report_imbalanced(y_test, y_pred)) ``` # Combination (Over and Under) Sampling In this section, you will test a combination over- and under-sampling algorithm to determine if the algorithm results in the best performance compared to the other sampling algorithms above. You will resample the data using the SMOTEENN algorithm and complete the folliowing steps: 1. View the count of the target classes using `Counter` from the collections library. 3. Use the resampled data to train a logistic regression model. 3. Calculate the balanced accuracy score from sklearn.metrics. 4. Display the confusion matrix from sklearn.metrics. 5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn. Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests ``` # Resample the training data with SMOTEENN from imblearn.combine import SMOTEENN sm = SMOTEENN(random_state=1) X_resampled, y_resampled = sm.fit_resample(X_train, y_train) # View the count of target classes with Counter Counter(y_resampled) # Train the Logistic Regression model using the resampled data model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) y_pred = model.predict(X_test) # Calculate the balanced accuracy score balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report print(classification_report_imbalanced(y_test, y_pred)) ``` # Final Questions 1. Which model had the best balanced accuracy score? The Balanced Accuracy Scores are very similar between the models. However, the Over Sampling models (both Naive and SMOTE) have the best Balanced Accuracy Score of 99.37% - Naive Random Over Smapling: 99.37% - SMOTE Over Sampling: 99.37% - Under Sampling: 98.81% - Combo:99.35% 2. Which model had the best recall score? All models individually output a an average recall score of 99% 3. Which model had the best geometric mean score? All models individually output a an average geo score of 99%
github_jupyter
import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd from pathlib import Path from collections import Counter # Load the data file_path = Path('Resources/lending_data.csv') df = pd.read_csv(file_path) df.head() # Create our features X = df.drop(columns={"loan_status", "homeowner"}) # Create our target y = df["loan_status"] X.describe() # Check the balance of our target values y.value_counts() # Create X_train, X_test, y_train, y_test from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) # Create the StandardScaler instance from sklearn.preprocessing import StandardScaler scaler = StandardScaler() # Fit the Standard Scaler with the training data # When fitting scaling functions, only train on the training dataset X_scaler = scaler.fit(X_train) # Scale the training and testing data X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) from sklearn.linear_model import LogisticRegression model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_train, y_train) # Calculated the balanced accuracy score from sklearn.metrics import balanced_accuracy_score y_pred = model.predict(X_test) balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report from imblearn.metrics import classification_report_imbalanced print(classification_report_imbalanced(y_test, y_pred)) # Resample the training data with the RandomOversampler from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(random_state=1) X_resampled, y_resampled = ros.fit_resample(X_train, y_train) # View the count of target classes with Counter Counter(y_resampled) # Train the Logistic Regression model using the resampled data model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) # Calculated the balanced accuracy score y_pred = model.predict(X_test) balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report from imblearn.metrics import classification_report_imbalanced print(classification_report_imbalanced(y_test, y_pred)) # Resample the training data with SMOTE from imblearn.over_sampling import SMOTE sm = SMOTE(random_state=1, sampling_strategy=1) X_resampled, y_resampled = sm.fit_resample(X_train, y_train) # View the count of target classes with Counter Counter(y_resampled) # Train the Logistic Regression model using the resampled data model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) # Calculated the balanced accuracy score y_pred = model.predict(X_test) balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report print(classification_report_imbalanced(y_test, y_pred)) # Resample the data using the ClusterCentroids resampler from imblearn.under_sampling import ClusterCentroids cc = ClusterCentroids(random_state=1) X_resampled, y_resampled = cc.fit_resample(X_train, y_train) # View the count of target classes with Counter Counter(y_resampled) # Train the Logistic Regression model using the resampled data model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) y_pred = model.predict(X_test) # Calculate the balanced accuracy score balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report print(classification_report_imbalanced(y_test, y_pred)) # Resample the training data with SMOTEENN from imblearn.combine import SMOTEENN sm = SMOTEENN(random_state=1) X_resampled, y_resampled = sm.fit_resample(X_train, y_train) # View the count of target classes with Counter Counter(y_resampled) # Train the Logistic Regression model using the resampled data model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) y_pred = model.predict(X_test) # Calculate the balanced accuracy score balanced_accuracy_score(y_test, y_pred) # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report print(classification_report_imbalanced(y_test, y_pred))
0.871092
0.963916
``` import pandas as pd import matplotlib.pyplot as plt import seaborn as sns; sns.set() import numpy as np import matplotlib.ticker as mtick import warnings warnings.filterwarnings('ignore') import os ``` # Data Exploration Tasks ``` print(os.getcwd()) os.chdir('../data/processed') combined_ofsted_df = pd.read_csv(os.getcwd() + '/ComponentOneData.csv') combined_ofsted_df.shape ``` ## Examine Distributions ``` ## printing low quantile and high quantile for outliers df = combined_ofsted_df.loc[:,combined_ofsted_df.dtypes == float] for i, col in enumerate(df.columns): plt.figure(i) sns.distplot(df[col],kde=True) print(col + ' Quantile Low: ' + str(df[col].quantile(0.01)) + ' Quantitle High: ' + str(df[col].quantile(0.99))) fig, ax = plt.subplots(figsize=(18,6)) sns.distplot(combined_ofsted_df['PNORG'],kde=False,ax=ax) ax.set_xlabel('Percentage of Girls on Roll') ax.set_ylabel('Number of Schools') mean = combined_ofsted_df['PNORG'].mean() median = combined_ofsted_df['PNORG'].median() plt.axvline(mean, color='r', linestyle='-',linewidth=3) plt.axvline(median, color='b', linestyle='--',linewidth=2) L = ax.legend({mean,median}, fontsize=18) L.get_texts()[0].set_text('Mean = {}%'.format(round(mean,1))) L.get_texts()[1].set_text('Median = {}%'.format(round(median,1))) tick = mtick.StrMethodFormatter('{x:,.0f}%') ax.xaxis.set_major_formatter(tick) ## replacing non-numeric values with mean ## printing low quantile and high quantile for outliers df = combined_ofsted_df.loc[:,combined_ofsted_df.dtypes == object].drop(columns=['GENDER','RELCHAR','Rating']) for i, col in enumerate(df.columns): mean = round(df[col].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} plt.figure(i) df[col] = df[col].replace(dictMean) # Replace placeholder with mean of the column df[col] = df[col].astype(float) # converting column in float so it can calculate Quantitle print(col + ' Quantile Low: ' + str(df[col].quantile(0.01)) + ' Quantitle High: ' + str(df[col].quantile(0.99))) sns.distplot(df[col],kde=True) combined_ofsted_df[['GENDER','RELCHAR','Rating']].describe().transpose() ## 3 unique values with mostly mixed schools in gender with freq 5590 ## 41 unique values with mostly None in RELCHAR with freq 2658 ## 6 unique values with mostly Good Rating with freq 2719 sns.countplot(x="GENDER", data=combined_ofsted_df) fig, ax = plt.subplots(figsize=(12,6)) sns.countplot(x="RELCHAR", data=combined_ofsted_df,ax=ax) plt.xticks(rotation=90); fig, ax = plt.subplots(figsize=(12,6)) sns.countplot(x="Rating", data=combined_ofsted_df,ax=ax) ``` ## Coorelation ``` df = combined_ofsted_df[['PNUMFSM', 'P8MEA']] for col in df.columns: mean = round(df[col].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} df[col] = df[col].astype(object).replace(dictMean) df['PNUMFSM'] = df['PNUMFSM'].astype(float) * 100 df['P8MEA'] = df['P8MEA'].astype(float) ## Data is showing more towards on x-axis 0 and y-axis 1 and -1 ##because I have imputed missing values and placeholders with mean. fig, ax = plt.subplots(figsize=(12,8)) sns.regplot(data = df ,x='PNUMFSM', y='P8MEA',scatter_kws={'alpha':0.2}); # plt.xlim(-0.9) # plt.ylim(-8) tick = mtick.StrMethodFormatter('{x:,.0f}%') ax.xaxis.set_major_formatter(tick) ax.set_xlabel('Percentage of Students Receiving Free School Meals') ax.set_ylabel('Progress 8 Adjusted Score') ``` **P8MEA** Progress 8 measure after adjustment for extreme scores **ATT8SCR** Average Attainment 8 score per pupil ``` ## replacing non-numeric values with mean so it will generate correlation df = combined_ofsted_df[['ATT8SCR','P8MEA']] for col in df.columns: mean = round(df[col].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} df[col] = df[col].replace(dictMean) df.astype(float).corr() ``` A positive correlation between a school’s Progress 8 score and its average Attainment 8 score ``` # imputing placeholders with -1 value so it will generate the same visualization as required df = combined_ofsted_df[['P8_BANDING','P8MEA']] for col in df.columns: placeholder = -1 dictMean = {'NE': placeholder, 'SUPP': placeholder, 'NP': placeholder, 'LOWCOV': placeholder,'SP': placeholder} df[col] = df[col].replace(dictMean) df[col] = df[col].astype(float) fig, ax = plt.subplots(figsize=(18,8)) sns.violinplot(data = df ,x='P8_BANDING', y='P8MEA'); ax.set_xlabel('Progress 8 Banding (1 = Well Above Average, 5 = Well Below Average, -1 = No Banding)') ax.set_ylabel('Progress 8 Measure After Adjustment') ``` Discover which Local Authority (LA) has the highest average Attainment 8 score and the highest Progress 8 score. Likewise, find the LA with the lowest scores and the one with the biggest range of values. ``` ## replacing non-numeric values with mean dfLaAndP8 = combined_ofsted_df[['LA','P8MEA','ATT8SCR']] mean = round(dfLaAndP8['P8MEA'].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} dfLaAndP8['P8MEA'] = dfLaAndP8['P8MEA'].replace(dictMean).astype(float) mean = round(dfLaAndP8['ATT8SCR'].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} dfLaAndP8['ATT8SCR'] = dfLaAndP8['ATT8SCR'].replace(dictMean).astype(float) ## Directory changed os.chdir('../raw') dfLACodes = pd.read_csv(os.getcwd() + '/la_and_region_codes_meta.csv') merge1_df = pd.merge(dfLaAndP8,dfLACodes, how='left', left_on='LA', right_on='LEA') merge1_df['P8MEA'] = merge1_df['P8MEA'].fillna(merge1_df['P8MEA'].mean()) merge1_df['ATT8SCR'] = merge1_df['ATT8SCR'].fillna(merge1_df['ATT8SCR'].mean()) ``` Local Authority (LA) has the highest average Attainment 8 score and the highest Progress 8 score ``` highestAttainment = merge1_df[merge1_df['ATT8SCR'] == merge1_df['ATT8SCR'].max()] print('Highest Avg Attainment 8 Score: '+ highestAttainment['ATT8SCR'].to_string(index=False) + ' where LA Name: '+ highestAttainment['LA Name'].to_string(index=False)) highestProgress = merge1_df[merge1_df['P8MEA'] == merge1_df['P8MEA'].max()] print('Highest Progress 8 Score: '+ highestProgress['P8MEA'].to_string(index=False) + ' where LA Name: '+ highestProgress['LA Name'].to_string(index=False)) ``` LA with the lowest scores ``` lowestAttainment = merge1_df[merge1_df['ATT8SCR'] == merge1_df['ATT8SCR'].min()] print('Lowest Avg Attainment 8 Score: '+ lowestAttainment['ATT8SCR'].to_string(index=False) + ' where LA Name: '+ lowestAttainment['LA Name'].to_string(index=False)) lowestProgress = merge1_df[merge1_df['P8MEA'] == merge1_df['P8MEA'].min()] print('Lowest Progress 8 Score: '+ lowestProgress['P8MEA'].to_string(index=False) + ' where LA Name: '+ lowestProgress['LA Name'].to_string(index=False)) ``` The one with the biggest range of values ``` merge1_df[merge1_df['ATT8SCR'] <= (merge1_df['ATT8SCR'].max() - merge1_df['ATT8SCR'].min())][['LA Name','ATT8SCR']].sample(10) merge1_df[merge1_df['P8MEA'] <= (merge1_df['P8MEA'].max() - merge1_df['P8MEA'].min())][['LA Name','P8MEA']].sample(10) ``` Regions have the highest and lowest Attainment 8 scores and Progress 8 scores. ``` print('Highest Avg Attainment 8 Score: '+ highestAttainment['ATT8SCR'].to_string(index=False) + ' where REGION NAME: '+ highestAttainment['REGION NAME'].to_string(index=False)) print('Highest Progress 8 Score: '+ highestProgress['P8MEA'].to_string(index=False) + ' where REGION NAME: '+ highestProgress['REGION NAME'].to_string(index=False)) print('Lowest Avg Attainment 8 Score: '+ lowestAttainment['ATT8SCR'].to_string(index=False) + ' where REGION NAME: '+ lowestAttainment['REGION NAME'].to_string(index=False)) print('Lowest Progress 8 Score: '+ lowestProgress['P8MEA'].to_string(index=False) + ' where REGION NAME: '+ lowestProgress['REGION NAME'].to_string(index=False)) ``` There are many regions which are parts of larger regions. Combine all the “London” regions into one region called “London”, all the “Yorkshire” regions into one called “Yorkshire” and all those regions which are labelled A, B, C or D (e.g. “South West B”) into one region. ``` merge1_df.loc[merge1_df['REGION NAME'].str.startswith('London') == True,'REGION NAME'] = 'London' merge1_df.loc[merge1_df['REGION NAME'].str.contains('Yorkshire') == True,'REGION NAME'] = 'Yorkshire' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('South West') == True,'REGION NAME'] = 'South West' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('South East') == True,'REGION NAME'] = 'South East' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('East of England') == True,'REGION NAME'] = 'East of England' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('North West') == True,'REGION NAME'] = 'North West' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('West Midlands') == True,'REGION NAME'] = 'West Midlands' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('North East') == True,'REGION NAME'] = 'North East' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('East Midlands') == True,'REGION NAME'] = 'East Midlands' fig, ax = plt.subplots(figsize=(18,8)) sns.barplot(x=merge1_df['REGION NAME'],y=merge1_df['ATT8SCR'],data=merge1_df,ci=95) ax.set_ylabel('Average Attainment 8 Score') ax.set_xlabel('Wider Region') ```
github_jupyter
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns; sns.set() import numpy as np import matplotlib.ticker as mtick import warnings warnings.filterwarnings('ignore') import os print(os.getcwd()) os.chdir('../data/processed') combined_ofsted_df = pd.read_csv(os.getcwd() + '/ComponentOneData.csv') combined_ofsted_df.shape ## printing low quantile and high quantile for outliers df = combined_ofsted_df.loc[:,combined_ofsted_df.dtypes == float] for i, col in enumerate(df.columns): plt.figure(i) sns.distplot(df[col],kde=True) print(col + ' Quantile Low: ' + str(df[col].quantile(0.01)) + ' Quantitle High: ' + str(df[col].quantile(0.99))) fig, ax = plt.subplots(figsize=(18,6)) sns.distplot(combined_ofsted_df['PNORG'],kde=False,ax=ax) ax.set_xlabel('Percentage of Girls on Roll') ax.set_ylabel('Number of Schools') mean = combined_ofsted_df['PNORG'].mean() median = combined_ofsted_df['PNORG'].median() plt.axvline(mean, color='r', linestyle='-',linewidth=3) plt.axvline(median, color='b', linestyle='--',linewidth=2) L = ax.legend({mean,median}, fontsize=18) L.get_texts()[0].set_text('Mean = {}%'.format(round(mean,1))) L.get_texts()[1].set_text('Median = {}%'.format(round(median,1))) tick = mtick.StrMethodFormatter('{x:,.0f}%') ax.xaxis.set_major_formatter(tick) ## replacing non-numeric values with mean ## printing low quantile and high quantile for outliers df = combined_ofsted_df.loc[:,combined_ofsted_df.dtypes == object].drop(columns=['GENDER','RELCHAR','Rating']) for i, col in enumerate(df.columns): mean = round(df[col].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} plt.figure(i) df[col] = df[col].replace(dictMean) # Replace placeholder with mean of the column df[col] = df[col].astype(float) # converting column in float so it can calculate Quantitle print(col + ' Quantile Low: ' + str(df[col].quantile(0.01)) + ' Quantitle High: ' + str(df[col].quantile(0.99))) sns.distplot(df[col],kde=True) combined_ofsted_df[['GENDER','RELCHAR','Rating']].describe().transpose() ## 3 unique values with mostly mixed schools in gender with freq 5590 ## 41 unique values with mostly None in RELCHAR with freq 2658 ## 6 unique values with mostly Good Rating with freq 2719 sns.countplot(x="GENDER", data=combined_ofsted_df) fig, ax = plt.subplots(figsize=(12,6)) sns.countplot(x="RELCHAR", data=combined_ofsted_df,ax=ax) plt.xticks(rotation=90); fig, ax = plt.subplots(figsize=(12,6)) sns.countplot(x="Rating", data=combined_ofsted_df,ax=ax) df = combined_ofsted_df[['PNUMFSM', 'P8MEA']] for col in df.columns: mean = round(df[col].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} df[col] = df[col].astype(object).replace(dictMean) df['PNUMFSM'] = df['PNUMFSM'].astype(float) * 100 df['P8MEA'] = df['P8MEA'].astype(float) ## Data is showing more towards on x-axis 0 and y-axis 1 and -1 ##because I have imputed missing values and placeholders with mean. fig, ax = plt.subplots(figsize=(12,8)) sns.regplot(data = df ,x='PNUMFSM', y='P8MEA',scatter_kws={'alpha':0.2}); # plt.xlim(-0.9) # plt.ylim(-8) tick = mtick.StrMethodFormatter('{x:,.0f}%') ax.xaxis.set_major_formatter(tick) ax.set_xlabel('Percentage of Students Receiving Free School Meals') ax.set_ylabel('Progress 8 Adjusted Score') ## replacing non-numeric values with mean so it will generate correlation df = combined_ofsted_df[['ATT8SCR','P8MEA']] for col in df.columns: mean = round(df[col].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} df[col] = df[col].replace(dictMean) df.astype(float).corr() # imputing placeholders with -1 value so it will generate the same visualization as required df = combined_ofsted_df[['P8_BANDING','P8MEA']] for col in df.columns: placeholder = -1 dictMean = {'NE': placeholder, 'SUPP': placeholder, 'NP': placeholder, 'LOWCOV': placeholder,'SP': placeholder} df[col] = df[col].replace(dictMean) df[col] = df[col].astype(float) fig, ax = plt.subplots(figsize=(18,8)) sns.violinplot(data = df ,x='P8_BANDING', y='P8MEA'); ax.set_xlabel('Progress 8 Banding (1 = Well Above Average, 5 = Well Below Average, -1 = No Banding)') ax.set_ylabel('Progress 8 Measure After Adjustment') ## replacing non-numeric values with mean dfLaAndP8 = combined_ofsted_df[['LA','P8MEA','ATT8SCR']] mean = round(dfLaAndP8['P8MEA'].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} dfLaAndP8['P8MEA'] = dfLaAndP8['P8MEA'].replace(dictMean).astype(float) mean = round(dfLaAndP8['ATT8SCR'].transform(pd.to_numeric, errors='coerce').mean(),1) dictMean = {'NE': mean, 'SUPP': mean, 'NP': mean, 'LOWCOV': mean,'SP': mean} dfLaAndP8['ATT8SCR'] = dfLaAndP8['ATT8SCR'].replace(dictMean).astype(float) ## Directory changed os.chdir('../raw') dfLACodes = pd.read_csv(os.getcwd() + '/la_and_region_codes_meta.csv') merge1_df = pd.merge(dfLaAndP8,dfLACodes, how='left', left_on='LA', right_on='LEA') merge1_df['P8MEA'] = merge1_df['P8MEA'].fillna(merge1_df['P8MEA'].mean()) merge1_df['ATT8SCR'] = merge1_df['ATT8SCR'].fillna(merge1_df['ATT8SCR'].mean()) highestAttainment = merge1_df[merge1_df['ATT8SCR'] == merge1_df['ATT8SCR'].max()] print('Highest Avg Attainment 8 Score: '+ highestAttainment['ATT8SCR'].to_string(index=False) + ' where LA Name: '+ highestAttainment['LA Name'].to_string(index=False)) highestProgress = merge1_df[merge1_df['P8MEA'] == merge1_df['P8MEA'].max()] print('Highest Progress 8 Score: '+ highestProgress['P8MEA'].to_string(index=False) + ' where LA Name: '+ highestProgress['LA Name'].to_string(index=False)) lowestAttainment = merge1_df[merge1_df['ATT8SCR'] == merge1_df['ATT8SCR'].min()] print('Lowest Avg Attainment 8 Score: '+ lowestAttainment['ATT8SCR'].to_string(index=False) + ' where LA Name: '+ lowestAttainment['LA Name'].to_string(index=False)) lowestProgress = merge1_df[merge1_df['P8MEA'] == merge1_df['P8MEA'].min()] print('Lowest Progress 8 Score: '+ lowestProgress['P8MEA'].to_string(index=False) + ' where LA Name: '+ lowestProgress['LA Name'].to_string(index=False)) merge1_df[merge1_df['ATT8SCR'] <= (merge1_df['ATT8SCR'].max() - merge1_df['ATT8SCR'].min())][['LA Name','ATT8SCR']].sample(10) merge1_df[merge1_df['P8MEA'] <= (merge1_df['P8MEA'].max() - merge1_df['P8MEA'].min())][['LA Name','P8MEA']].sample(10) print('Highest Avg Attainment 8 Score: '+ highestAttainment['ATT8SCR'].to_string(index=False) + ' where REGION NAME: '+ highestAttainment['REGION NAME'].to_string(index=False)) print('Highest Progress 8 Score: '+ highestProgress['P8MEA'].to_string(index=False) + ' where REGION NAME: '+ highestProgress['REGION NAME'].to_string(index=False)) print('Lowest Avg Attainment 8 Score: '+ lowestAttainment['ATT8SCR'].to_string(index=False) + ' where REGION NAME: '+ lowestAttainment['REGION NAME'].to_string(index=False)) print('Lowest Progress 8 Score: '+ lowestProgress['P8MEA'].to_string(index=False) + ' where REGION NAME: '+ lowestProgress['REGION NAME'].to_string(index=False)) merge1_df.loc[merge1_df['REGION NAME'].str.startswith('London') == True,'REGION NAME'] = 'London' merge1_df.loc[merge1_df['REGION NAME'].str.contains('Yorkshire') == True,'REGION NAME'] = 'Yorkshire' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('South West') == True,'REGION NAME'] = 'South West' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('South East') == True,'REGION NAME'] = 'South East' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('East of England') == True,'REGION NAME'] = 'East of England' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('North West') == True,'REGION NAME'] = 'North West' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('West Midlands') == True,'REGION NAME'] = 'West Midlands' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('North East') == True,'REGION NAME'] = 'North East' merge1_df.loc[merge1_df['REGION NAME'].str.startswith('East Midlands') == True,'REGION NAME'] = 'East Midlands' fig, ax = plt.subplots(figsize=(18,8)) sns.barplot(x=merge1_df['REGION NAME'],y=merge1_df['ATT8SCR'],data=merge1_df,ci=95) ax.set_ylabel('Average Attainment 8 Score') ax.set_xlabel('Wider Region')
0.41324
0.807081
``` if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager") BiocManager::install("edgeR") BiocManager::install('DESeq') install.packages('statmod') install.packages("gplots") install.packages("fastICA") ``` http://pklab.med.harvard.edu/scw2014/subpop_tutorial.html ``` sessionInfo() library(DESeq) library(statmod) library(fastICA) directory <-"/home/fvalle/phd/results/tcga/oversigma_10tissue" targets <- read.delim("/home/fvalle/phd/results/gtex/protein-coding/files.dat", sep=',', row.names=1) head(targets) rawdata <- read.delim("/home/fvalle/phd/results/gtex/protein-coding/mainTable.csv", sep=',', row.names=1) head(rawdata) group <- factor(paste0(targets$primary_site,".",targets$disease_type)) lib.size<-estimateSizeFactorsForMatrix(rawdata) ed <- t(t(rawdata)/lib.size) means <- rowMeans(ed) vars <- apply(ed,1,var) cv2 <- vars/means^2 par(mar=c(3.5,3.5,1,1),mgp=c(2,0.65,0),cex=0.9) smoothScatter(log(means),log(cv2)) minMeanForFit <- unname( quantile( means[which(cv2>0.01)], .4 ) ) useForFit <- means >= minMeanForFit # & spikeins fit <- glmgam.fit( cbind( a0 = 1, a1tilde = 1/means[useForFit] ),cv2[useForFit] ) a0 <- unname( fit$coefficients["a0"] ) a1 <- unname( fit$coefficients["a1tilde"]) fit$coefficients # repeat previous plot par(mar=c(3.5,3.5,1,1),mgp=c(2,0.65,0),cex=0.9); smoothScatter(log(means),log(cv2)); xg <- exp(seq( min(log(means[means>0])), max(log(means)), length.out=1000 )) vfit <- a1/xg + a0 # add fit line lines( log(xg), log(vfit), col="black", lwd=3 ) df <- ncol(ed) - 1 # add confidence interval lines(log(xg),log(vfit * qchisq(0.975,df)/df),lty=2,col="black") lines(log(xg),log(vfit * qchisq(0.1,df)/df),lty=2,col="black") afit <- a1/means+a0 varFitRatio <- vars/(afit*means^2) varorder <- order(varFitRatio,decreasing=T) oed <- ed[varorder,] # repeat previous plot par(mar=c(3.5,3.5,1,1),mgp=c(2,0.65,0),cex=0.9) smoothScatter(log(means),log(cv2), main="counts"); lines(log(xg), log(vfit), col="black", lwd=3 ); lines(log(xg),log(vfit * qchisq(0.975,df)/df),lty=2,col="black"); lines(log(xg),log(vfit * qchisq(0.025,df)/df),lty=2,col="black"); # add top 100 genes points(log(means[varorder[1:200]]),log(cv2[varorder[1:200]]),col=2) m = ncol(rawdata) fdr=0.0001 testDenom <- (means*a1 + means^2*cv2)/(1+cv2/m) p <- 1-pchisq(varsGenes * (m-1)/testDenom,m-1) padj <- p.adjust(p,"BH") sig <- padj < fdr sig[is.na(sig)] <- FALSE names(means)[varorder[1:5]] names(means)[sig][1:5] #https://github.com/hemberg-lab/scRNA.seq.funcs/blob/master/R/brennecke.R #https://www.nature.com/articles/nmeth.2645 Brennecke_getVariableGenes <- function(expr_mat, spikes=NA, suppress.plot=FALSE, fdr=0.1, minBiolDisp=0.5) { # require(statmod) rowVars <- function(x) { unlist(apply(x,1,var))} colGenes = "black" colSp = "grey35" fullCountTable <- expr_mat; if (is.character(spikes)) { sp = rownames(fullCountTable) %in% spikes; countsSp <- fullCountTable[sp,]; countsGenes <- fullCountTable[!sp,]; } else if (is.numeric(spikes)) { countsSp <- fullCountTable[spikes,]; countsGenes <- fullCountTable[-spikes,]; } else { countsSp = fullCountTable; countsGenes = fullCountTable; } meansSp = rowMeans(countsSp) varsSp = rowVars(countsSp) cv2Sp = varsSp/meansSp^2 meansGenes = rowMeans(countsGenes) varsGenes = rowVars(countsGenes) cv2Genes = varsGenes/meansGenes^2 # Fit Model minMeanForFit <- unname( quantile( meansSp[ which( cv2Sp > 0.01 ) ], 0.40)) useForFit <- meansSp >= minMeanForFit # if (sum(useForFit) < 50) { # warning("Too few spike-ins exceed minMeanForFit, recomputing using all genes.") # meansAll = c(meansGenes, meansSp) # cv2All = c(cv2Genes,cv2Sp) # minMeanForFit <- unname( quantile( meansAll[ which( cv2All > 0.3 ) ], 0.80)) # useForFit <- meansSp >= minMeanForFit # } if (sum(useForFit) < 30) {warning(paste("Only", sum(useForFit), "spike-ins to be used in fitting, may result in poor fit."))} fit <- glmgam.fit( cbind( a0 = 1, a1tilde = 1/meansSp[useForFit] ), cv2Sp[useForFit] ) a0 <- unname( fit$coefficients["a0"] ) a1 <- unname( fit$coefficients["a1tilde"]) # Test psia1theta <- a1 minBiolDisp <- minBiolDisp^2 m = ncol(countsSp); cv2th <- a0 + minBiolDisp + a0 * minBiolDisp testDenom <- (meansGenes*psia1theta + meansGenes^2*cv2th)/(1+cv2th/m) p <- 1-pchisq(varsGenes * (m-1)/testDenom,m-1) padj <- p.adjust(p,"BH") sig <- padj < fdr sig[is.na(sig)] <- FALSE if (!suppress.plot) { plot( meansGenes,cv2Genes, xaxt="n", yaxt="n", log="xy", xlab = "average normalized read count", ylab = "squared coefficient of variation (CV^2)", col="white") axis( 1, 10^(-2:5), c( "0.01", "0.1", "1", "10", "100", "1000", expression(10^4), expression(10^5) ) ) axis( 2, 10^(-2:3), c( "0.01", "0.1", "1", "10", "100","1000" ), las=2 ) abline( h=10^(-2:1), v=10^(-1:5), col="#D0D0D0", lwd=2 ) # Plot the genes, use a different color if they are highly variable points( meansGenes, cv2Genes, pch=20, cex=.2, col = ifelse( padj < .1, "#C0007090", colGenes ) ) points( meansSp, cv2Sp, pch=20, cex=.5, col="blue1") # Add the technical noise fit xg <- 10^seq( -2, 6, length.out=1000 ) lines( xg, (a1)/xg + a0, col="#FF000080", lwd=3 ) # Add a curve showing the expectation for the chosen biological CV^2 thershold lines( xg, psia1theta/xg + a0 + minBiolDisp, lty="dashed", col="#C0007090", lwd=3) } return(names(meansGenes)[sig]) } Brennecke_getVariableGenes(rawdata) ```
github_jupyter
if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager") BiocManager::install("edgeR") BiocManager::install('DESeq') install.packages('statmod') install.packages("gplots") install.packages("fastICA") sessionInfo() library(DESeq) library(statmod) library(fastICA) directory <-"/home/fvalle/phd/results/tcga/oversigma_10tissue" targets <- read.delim("/home/fvalle/phd/results/gtex/protein-coding/files.dat", sep=',', row.names=1) head(targets) rawdata <- read.delim("/home/fvalle/phd/results/gtex/protein-coding/mainTable.csv", sep=',', row.names=1) head(rawdata) group <- factor(paste0(targets$primary_site,".",targets$disease_type)) lib.size<-estimateSizeFactorsForMatrix(rawdata) ed <- t(t(rawdata)/lib.size) means <- rowMeans(ed) vars <- apply(ed,1,var) cv2 <- vars/means^2 par(mar=c(3.5,3.5,1,1),mgp=c(2,0.65,0),cex=0.9) smoothScatter(log(means),log(cv2)) minMeanForFit <- unname( quantile( means[which(cv2>0.01)], .4 ) ) useForFit <- means >= minMeanForFit # & spikeins fit <- glmgam.fit( cbind( a0 = 1, a1tilde = 1/means[useForFit] ),cv2[useForFit] ) a0 <- unname( fit$coefficients["a0"] ) a1 <- unname( fit$coefficients["a1tilde"]) fit$coefficients # repeat previous plot par(mar=c(3.5,3.5,1,1),mgp=c(2,0.65,0),cex=0.9); smoothScatter(log(means),log(cv2)); xg <- exp(seq( min(log(means[means>0])), max(log(means)), length.out=1000 )) vfit <- a1/xg + a0 # add fit line lines( log(xg), log(vfit), col="black", lwd=3 ) df <- ncol(ed) - 1 # add confidence interval lines(log(xg),log(vfit * qchisq(0.975,df)/df),lty=2,col="black") lines(log(xg),log(vfit * qchisq(0.1,df)/df),lty=2,col="black") afit <- a1/means+a0 varFitRatio <- vars/(afit*means^2) varorder <- order(varFitRatio,decreasing=T) oed <- ed[varorder,] # repeat previous plot par(mar=c(3.5,3.5,1,1),mgp=c(2,0.65,0),cex=0.9) smoothScatter(log(means),log(cv2), main="counts"); lines(log(xg), log(vfit), col="black", lwd=3 ); lines(log(xg),log(vfit * qchisq(0.975,df)/df),lty=2,col="black"); lines(log(xg),log(vfit * qchisq(0.025,df)/df),lty=2,col="black"); # add top 100 genes points(log(means[varorder[1:200]]),log(cv2[varorder[1:200]]),col=2) m = ncol(rawdata) fdr=0.0001 testDenom <- (means*a1 + means^2*cv2)/(1+cv2/m) p <- 1-pchisq(varsGenes * (m-1)/testDenom,m-1) padj <- p.adjust(p,"BH") sig <- padj < fdr sig[is.na(sig)] <- FALSE names(means)[varorder[1:5]] names(means)[sig][1:5] #https://github.com/hemberg-lab/scRNA.seq.funcs/blob/master/R/brennecke.R #https://www.nature.com/articles/nmeth.2645 Brennecke_getVariableGenes <- function(expr_mat, spikes=NA, suppress.plot=FALSE, fdr=0.1, minBiolDisp=0.5) { # require(statmod) rowVars <- function(x) { unlist(apply(x,1,var))} colGenes = "black" colSp = "grey35" fullCountTable <- expr_mat; if (is.character(spikes)) { sp = rownames(fullCountTable) %in% spikes; countsSp <- fullCountTable[sp,]; countsGenes <- fullCountTable[!sp,]; } else if (is.numeric(spikes)) { countsSp <- fullCountTable[spikes,]; countsGenes <- fullCountTable[-spikes,]; } else { countsSp = fullCountTable; countsGenes = fullCountTable; } meansSp = rowMeans(countsSp) varsSp = rowVars(countsSp) cv2Sp = varsSp/meansSp^2 meansGenes = rowMeans(countsGenes) varsGenes = rowVars(countsGenes) cv2Genes = varsGenes/meansGenes^2 # Fit Model minMeanForFit <- unname( quantile( meansSp[ which( cv2Sp > 0.01 ) ], 0.40)) useForFit <- meansSp >= minMeanForFit # if (sum(useForFit) < 50) { # warning("Too few spike-ins exceed minMeanForFit, recomputing using all genes.") # meansAll = c(meansGenes, meansSp) # cv2All = c(cv2Genes,cv2Sp) # minMeanForFit <- unname( quantile( meansAll[ which( cv2All > 0.3 ) ], 0.80)) # useForFit <- meansSp >= minMeanForFit # } if (sum(useForFit) < 30) {warning(paste("Only", sum(useForFit), "spike-ins to be used in fitting, may result in poor fit."))} fit <- glmgam.fit( cbind( a0 = 1, a1tilde = 1/meansSp[useForFit] ), cv2Sp[useForFit] ) a0 <- unname( fit$coefficients["a0"] ) a1 <- unname( fit$coefficients["a1tilde"]) # Test psia1theta <- a1 minBiolDisp <- minBiolDisp^2 m = ncol(countsSp); cv2th <- a0 + minBiolDisp + a0 * minBiolDisp testDenom <- (meansGenes*psia1theta + meansGenes^2*cv2th)/(1+cv2th/m) p <- 1-pchisq(varsGenes * (m-1)/testDenom,m-1) padj <- p.adjust(p,"BH") sig <- padj < fdr sig[is.na(sig)] <- FALSE if (!suppress.plot) { plot( meansGenes,cv2Genes, xaxt="n", yaxt="n", log="xy", xlab = "average normalized read count", ylab = "squared coefficient of variation (CV^2)", col="white") axis( 1, 10^(-2:5), c( "0.01", "0.1", "1", "10", "100", "1000", expression(10^4), expression(10^5) ) ) axis( 2, 10^(-2:3), c( "0.01", "0.1", "1", "10", "100","1000" ), las=2 ) abline( h=10^(-2:1), v=10^(-1:5), col="#D0D0D0", lwd=2 ) # Plot the genes, use a different color if they are highly variable points( meansGenes, cv2Genes, pch=20, cex=.2, col = ifelse( padj < .1, "#C0007090", colGenes ) ) points( meansSp, cv2Sp, pch=20, cex=.5, col="blue1") # Add the technical noise fit xg <- 10^seq( -2, 6, length.out=1000 ) lines( xg, (a1)/xg + a0, col="#FF000080", lwd=3 ) # Add a curve showing the expectation for the chosen biological CV^2 thershold lines( xg, psia1theta/xg + a0 + minBiolDisp, lty="dashed", col="#C0007090", lwd=3) } return(names(meansGenes)[sig]) } Brennecke_getVariableGenes(rawdata)
0.556159
0.471832
# Outliers ## Initialization ``` # Importing verticapy import verticapy as vp # Creating a connection vp.new_connection({"host": "10.211.55.14", "port": "5433", "database": "testdb", "password": "XxX", "user": "dbadmin"}, name = "VerticaDSN") vp.__version__ from verticapy.utilities import create_schema, drop drop("outliers_exercise", method="schema") create_schema("outliers_exercise") ``` ## Dataset Creation ``` from verticapy.datasets import gen_dataset from datetime import datetime # Generating different transactions # Regular Transactions are between 0 and 100$. They are mostly during the Business # hours but can happen outsides regular_transaction_business_hours = gen_dataset({"hour": {"type": float, "range": [8.0, 20.0]}, "amount": {"type": float, "range": [0.0, 100.0]}, "category": {"type": str, "values": ["Regular"]}}, nrows=300,) regular_transaction = gen_dataset({"hour": {"type": float, "range": [0.0, 24.0]}, "amount": {"type": float, "range": [0.0, 100.0]}, "category": {"type": str, "values": ["Regular"]}}, nrows=100,) # Some high transactions can occur sometimes. However they can only happen during the # Business hours and they are between 3000 and 20000$. high_transaction = gen_dataset({"hour": {"type": float, "range": [8.0, 20.0]}, "amount": {"type": float, "range": [3000.0, 20000.0]}, "category": {"type": str, "values": ["High"]},}, nrows=15,) # Fraudulent Transactions are happening during night and are between 300 and 1000$. fraudulent_transaction = gen_dataset({"hour": {"type": float, "range": [2.0, 5.0]}, "amount": {"type": float, "range": [300.0, 1000.0]}, "category": {"type": str, "values": ["Fraud"]},}, nrows=20) # Our dataset will combine all the generated ones bank_transaction = regular_transaction.append(regular_transaction_business_hours).append( high_transaction).append( fraudulent_transaction).to_db("outliers_exercise.transactions", relation_type="table", inplace=True) ``` ## Data Exploration ``` bank_transaction.scatter(["hour", "amount"], catcol="category",) bank_transaction.search("amount < 2000").scatter(["hour", "amount"], catcol="category",) ``` ## Global Outliers Detection ``` # ZSCORE can catch very high transactions but not fraudulent transactions bank_transaction.outliers(columns = ["hour", "amount"], name = "global_outliers_zscore", threshold = 3, robust = False) bank_transaction.scatter(["hour", "amount"], catcol="global_outliers_zscore",) # Robust ZSCORE can catch anomalies and high transactions bank_transaction.outliers(columns = ["hour", "amount"], name = "global_outliers_robust", threshold = 3, robust = True) bank_transaction.scatter(["hour", "amount"], catcol="global_outliers_robust",) ``` ## Data Preparation & Modeling ``` from verticapy.learn.preprocessing import Normalizer model_robust = Normalizer("outliers_exercise.robust_normalizer", method="robust_zscore") model_robust.fit(bank_transaction, ["hour", "amount"]) ``` ### Pipeline Model ``` # We need to normalize the data in order to use ML algorithms which # are sensible to the p-distance bank_transaction_anomaly = bank_transaction.search("global_outliers_robust = 1") bank_transaction_anomaly.scatter(["hour", "amount"],) from verticapy.learn.pipeline import Pipeline from verticapy.learn.preprocessing import Normalizer from verticapy.learn.cluster import KMeans model = Pipeline([("normalizer_bank", Normalizer("outliers_exercise.fraud_normalizer")), ("kmeans_bank" , KMeans("outliers_exercise.fraud_kmeans", n_cluster=2, init=[(-1.0, -1.0), (0.0, 0.0),]))]) model.fit(bank_transaction_anomaly, ["hour", "amount"]) model[-1].plot() ``` ### Model Deployment #### in-DB = Near Real-Time ``` # Data are already in Vertica so the model is already deployed bank_transaction = model.predict(bank_transaction, name="cluster") bank_transaction["is_fraud"] = "global_outliers_robust AND cluster=0" bank_transaction.search("is_fraud AND category = 'Fraud'").shape()[0] bank_transaction.search("category = 'Fraud'").shape()[0] print(bank_transaction.current_relation()) ``` #### At the source of the Data Stream = Real-Time ``` # We need to deploy the model outside Vertica # We export the Robust ZSCORE function robust_model = model_robust.to_python(name="robust_model") print(model_robust.to_python(return_str=True, name="robust_model")) # We export the Pipeline Function kmeans_model = model.to_python(name="kmeans_model") print(model.to_python(return_str=True, name="kmeans_model")) # We merge our 2 functions in a global function import numpy as np def anomaly_detection(X): def apply_row(X): x = robust_model([X]) if x[0][0] < 3 and x[0][1] < 3: return False else: x = kmeans_model([X]) return x[0] == 0 return np.apply_along_axis(apply_row, 1, X) anomaly_detection([[3, 200]]) anomaly_detection([[14, 50]]) # We can deploy our model at the source of the data stream import random N = 10000 i, k = 0, 0 while True: if random.random() < 0.05: hour = random.random() * 3 + 2 amount = random.random() * 700 + 300 else: hour = random.random() * 24 amount = random.random() * 100 transaction = [hour, amount] if anomaly_detection([transaction]): print("Anomalous Transaction: {}".format(transaction)) k += 1 i += 1 if i > N: print("Number of anomalies: {} among {} transactions".format(k, N)) break ```
github_jupyter
# Importing verticapy import verticapy as vp # Creating a connection vp.new_connection({"host": "10.211.55.14", "port": "5433", "database": "testdb", "password": "XxX", "user": "dbadmin"}, name = "VerticaDSN") vp.__version__ from verticapy.utilities import create_schema, drop drop("outliers_exercise", method="schema") create_schema("outliers_exercise") from verticapy.datasets import gen_dataset from datetime import datetime # Generating different transactions # Regular Transactions are between 0 and 100$. They are mostly during the Business # hours but can happen outsides regular_transaction_business_hours = gen_dataset({"hour": {"type": float, "range": [8.0, 20.0]}, "amount": {"type": float, "range": [0.0, 100.0]}, "category": {"type": str, "values": ["Regular"]}}, nrows=300,) regular_transaction = gen_dataset({"hour": {"type": float, "range": [0.0, 24.0]}, "amount": {"type": float, "range": [0.0, 100.0]}, "category": {"type": str, "values": ["Regular"]}}, nrows=100,) # Some high transactions can occur sometimes. However they can only happen during the # Business hours and they are between 3000 and 20000$. high_transaction = gen_dataset({"hour": {"type": float, "range": [8.0, 20.0]}, "amount": {"type": float, "range": [3000.0, 20000.0]}, "category": {"type": str, "values": ["High"]},}, nrows=15,) # Fraudulent Transactions are happening during night and are between 300 and 1000$. fraudulent_transaction = gen_dataset({"hour": {"type": float, "range": [2.0, 5.0]}, "amount": {"type": float, "range": [300.0, 1000.0]}, "category": {"type": str, "values": ["Fraud"]},}, nrows=20) # Our dataset will combine all the generated ones bank_transaction = regular_transaction.append(regular_transaction_business_hours).append( high_transaction).append( fraudulent_transaction).to_db("outliers_exercise.transactions", relation_type="table", inplace=True) bank_transaction.scatter(["hour", "amount"], catcol="category",) bank_transaction.search("amount < 2000").scatter(["hour", "amount"], catcol="category",) # ZSCORE can catch very high transactions but not fraudulent transactions bank_transaction.outliers(columns = ["hour", "amount"], name = "global_outliers_zscore", threshold = 3, robust = False) bank_transaction.scatter(["hour", "amount"], catcol="global_outliers_zscore",) # Robust ZSCORE can catch anomalies and high transactions bank_transaction.outliers(columns = ["hour", "amount"], name = "global_outliers_robust", threshold = 3, robust = True) bank_transaction.scatter(["hour", "amount"], catcol="global_outliers_robust",) from verticapy.learn.preprocessing import Normalizer model_robust = Normalizer("outliers_exercise.robust_normalizer", method="robust_zscore") model_robust.fit(bank_transaction, ["hour", "amount"]) # We need to normalize the data in order to use ML algorithms which # are sensible to the p-distance bank_transaction_anomaly = bank_transaction.search("global_outliers_robust = 1") bank_transaction_anomaly.scatter(["hour", "amount"],) from verticapy.learn.pipeline import Pipeline from verticapy.learn.preprocessing import Normalizer from verticapy.learn.cluster import KMeans model = Pipeline([("normalizer_bank", Normalizer("outliers_exercise.fraud_normalizer")), ("kmeans_bank" , KMeans("outliers_exercise.fraud_kmeans", n_cluster=2, init=[(-1.0, -1.0), (0.0, 0.0),]))]) model.fit(bank_transaction_anomaly, ["hour", "amount"]) model[-1].plot() # Data are already in Vertica so the model is already deployed bank_transaction = model.predict(bank_transaction, name="cluster") bank_transaction["is_fraud"] = "global_outliers_robust AND cluster=0" bank_transaction.search("is_fraud AND category = 'Fraud'").shape()[0] bank_transaction.search("category = 'Fraud'").shape()[0] print(bank_transaction.current_relation()) # We need to deploy the model outside Vertica # We export the Robust ZSCORE function robust_model = model_robust.to_python(name="robust_model") print(model_robust.to_python(return_str=True, name="robust_model")) # We export the Pipeline Function kmeans_model = model.to_python(name="kmeans_model") print(model.to_python(return_str=True, name="kmeans_model")) # We merge our 2 functions in a global function import numpy as np def anomaly_detection(X): def apply_row(X): x = robust_model([X]) if x[0][0] < 3 and x[0][1] < 3: return False else: x = kmeans_model([X]) return x[0] == 0 return np.apply_along_axis(apply_row, 1, X) anomaly_detection([[3, 200]]) anomaly_detection([[14, 50]]) # We can deploy our model at the source of the data stream import random N = 10000 i, k = 0, 0 while True: if random.random() < 0.05: hour = random.random() * 3 + 2 amount = random.random() * 700 + 300 else: hour = random.random() * 24 amount = random.random() * 100 transaction = [hour, amount] if anomaly_detection([transaction]): print("Anomalous Transaction: {}".format(transaction)) k += 1 i += 1 if i > N: print("Number of anomalies: {} among {} transactions".format(k, N)) break
0.557604
0.864024
``` from google.colab import drive drive.mount('/content/gdrive') import os os.chdir('/content/gdrive/My Drive/finch/tensorflow2/knowledge_graph_completion/wn18/main') %tensorflow_version 2.x !pip install tensorflow-addons from tensorflow_addons.optimizers.cyclical_learning_rate import Triangular2CyclicalLearningRate import tensorflow as tf import pprint import logging import time print("TensorFlow Version", tf.__version__) print('GPU Enabled:', tf.test.is_gpu_available()) def get_vocab(f_path): word2idx = {} with open(f_path) as f: for i, line in enumerate(f): line = line.rstrip() word2idx[line] = i return word2idx """ we use 1vN fast evaluation as purposed in ConvE paper: "https://arxiv.org/abs/1707.01476" sp2o is a dictionary that maps a pair of <subject, predicate> to multiple possible corresponding <objects> in graph """ def make_sp2o(f_paths, e2idx, r2idx): sp2o = {} for f_path in f_paths: with open(f_path) as f: for line in f: line = line.rstrip() s, p, o = line.split() s, p, o = e2idx[s], r2idx[p], e2idx[o] if (s,p) not in sp2o: sp2o[(s,p)] = [o] else: if o not in sp2o[(s,p)]: sp2o[(s,p)].append(o) return sp2o def map_fn(x, y): i, v, s = y[0] one_hot = tf.SparseTensor(i, v, s) return x, (one_hot, y[1], y[2]) # stream data from text files def data_generator(f_path, params, sp2o): with open(f_path) as f: print('Reading', f_path) for line in f: line = line.rstrip() s, p, o = line.split() s, p, o = params['e2idx'][s], params['r2idx'][p], params['e2idx'][o] sparse_i = [[x] for x in sp2o[(s, p)]] sparse_v = [1.] * len(sparse_i) sparse_s = [len(params['e2idx'])] yield ((s, p), ((sparse_i, sparse_v, sparse_s), o, len(sparse_i))) def dataset(is_training, params, sp2o): _shapes = (([], []), (([None, 1], [None], [1]), [], [])) _types = ((tf.int32, tf.int32), ((tf.int64, tf.float32, tf.int64), tf.int32, tf.int32)) if is_training: ds = tf.data.Dataset.from_generator( lambda: data_generator(params['train_path'], params, sp2o), output_shapes = _shapes, output_types = _types,) ds = ds.shuffle(params['num_samples']) ds = ds.map(map_fn) ds = ds.batch(params['batch_size']) else: ds = tf.data.Dataset.from_generator( lambda: data_generator(params['test_path'], params, sp2o), output_shapes = _shapes, output_types = _types,) ds = ds.map(map_fn) ds = ds.batch(params['batch_size']) return ds def update_metrics(scores, query, metrics): to_float = lambda x: tf.cast(x, tf.float32) _, i = tf.math.top_k(scores, sorted=True, k=scores.shape[1]) query = tf.expand_dims(query, 1) is_query = to_float(tf.equal(i, query)) r = tf.argmax(is_query, -1) + 1 mrr = 1. / to_float(r) hits_10 = to_float(tf.less_equal(r, 10)) hits_3 = to_float(tf.less_equal(r, 3)) hits_1 = to_float(tf.less_equal(r, 1)) metrics['mrr'].update_state(mrr) metrics['hits_10'].update_state(hits_10) metrics['hits_3'].update_state(hits_3) metrics['hits_1'].update_state(hits_1) class Complex(tf.keras.Model): def __init__(self, params): super().__init__() self.embed_e_real = tf.keras.layers.Embedding(input_dim=len(params['e2idx']), output_dim=params['embed_dim'], embeddings_initializer=tf.initializers.RandomUniform(), name='Entity_Real') self.embed_e_img = tf.keras.layers.Embedding(input_dim=len(params['e2idx']), output_dim=params['embed_dim'], embeddings_initializer=tf.initializers.RandomUniform(), name='Entity_Img') self.embed_rel_real = tf.keras.layers.Embedding(input_dim=len(params['r2idx']), output_dim=params['embed_dim'], embeddings_initializer=tf.initializers.RandomUniform(), name='Relation_Real') self.embed_rel_img = tf.keras.layers.Embedding(input_dim=len(params['r2idx']), output_dim=params['embed_dim'], embeddings_initializer=tf.initializers.RandomUniform(), name='Relation_Img') self.out_bias = self.add_weight(name='out_bias', shape=[len(params['e2idx'])]) def call(self, inputs): s, p = inputs s_real = self.embed_e_real(s) p_real = self.embed_rel_real(p) s_img = self.embed_e_img(s) p_img = self.embed_rel_img(p) realrealreal = tf.matmul(s_real*p_real, self.embed_e_real.embeddings, transpose_b=True) realimgimg = tf.matmul(s_real*p_img, self.embed_e_img.embeddings, transpose_b=True) imgrealimg = tf.matmul(s_img*p_real, self.embed_e_img.embeddings, transpose_b=True) imgimgreal = tf.matmul(s_img*p_img, self.embed_e_real.embeddings, transpose_b=True) x = realrealreal + realimgimg + imgrealimg - imgimgreal x = tf.nn.bias_add(x, self.out_bias) return x def label_smoothing(inputs, epsilon): V = inputs.get_shape().as_list()[-1] return ((1-epsilon) * inputs) + (epsilon / V) params = { 'train_path': '../data/wn18/train.txt', 'valid_path': '../data/wn18/valid.txt', 'test_path': '../data/wn18/test.txt', 'entity_path': '../vocab/entity.txt', 'relation_path': '../vocab/relation.txt', 'batch_size': 128, 'embed_dim': 200, 'num_samples': 141442, 'init_lr': 1e-4, 'max_lr': 2e-3, 'num_patience': 10, 'epsilon': .1, } params['e2idx'] = get_vocab(params['entity_path']) params['r2idx'] = get_vocab(params['relation_path']) sp2o_tr = make_sp2o([params['train_path']], params['e2idx'], params['r2idx']) sp2o_all = make_sp2o([params['train_path'], params['test_path'], params['valid_path']], params['e2idx'], params['r2idx']) model = Complex(params) model.build(input_shape=[[None], [None]]) pprint.pprint([(v.name, v.shape) for v in model.trainable_variables]) decay_lr = Triangular2CyclicalLearningRate( initial_learning_rate = params['init_lr'], maximal_learning_rate = params['max_lr'], step_size = 8 * params['num_samples'] // params['batch_size'],) optim = tf.optimizers.Adam(params['init_lr']) global_step = 0 best_mrr = 0. count = 0 t0 = time.time() logger = logging.getLogger('tensorflow') logger.setLevel(logging.INFO) while True: # TRAINING for ((s, p), (multi_o, o, num_pos)) in dataset(is_training=True, params=params, sp2o=sp2o_tr): with tf.GradientTape() as tape: logits = model((s, p)) multi_o = tf.sparse.to_dense(multi_o, validate_indices=False) num_neg = len(params['e2idx']) - num_pos pos_weight = tf.expand_dims(tf.cast(num_neg/num_pos, tf.float32), 1) labels = label_smoothing(multi_o, params['epsilon']) loss = tf.nn.weighted_cross_entropy_with_logits(labels=labels, logits=logits, pos_weight=pos_weight) loss = tf.reduce_mean(loss) optim.lr.assign(decay_lr(global_step)) grads = tape.gradient(loss, model.trainable_variables) optim.apply_gradients(zip(grads, model.trainable_variables)) if global_step % 50 == 0: logger.info("Step {} | Loss: {:.4f} | Spent: {:.1f} secs | LR: {:.6f}".format( global_step, loss.numpy().item(), time.time()-t0, optim.lr.numpy().item())) t0 = time.time() global_step += 1 # EVALUATION metrics = { 'mrr': tf.metrics.Mean(), 'hits_10': tf.metrics.Mean(), 'hits_3': tf.metrics.Mean(), 'hits_1': tf.metrics.Mean(), } for ((s, p), (multi_o, o, num_pos)) in dataset(is_training=False, params=params, sp2o=sp2o_all): logits = model((s, p)) multi_o = tf.sparse.to_dense(multi_o, validate_indices=False) # create masks for Filtered MRR o_one_hot = tf.one_hot(o, len(params['e2idx'])) unwanted = multi_o - o_one_hot masks = tf.cast(tf.equal(unwanted, 0.), tf.float32) scores = tf.sigmoid(logits) * masks update_metrics(scores=scores, query=o, metrics=metrics) logger.info("MRR: {:.3f}| Hits@10: {:.3f} | Hits@3: {:.3f} | Hits@1: {:.3f}".format( metrics['mrr'].result().numpy(), metrics['hits_10'].result().numpy(), metrics['hits_3'].result().numpy(), metrics['hits_1'].result().numpy())) mrr = metrics['mrr'].result().numpy() if mrr > best_mrr: best_mrr = mrr # you can save model here count = 0 else: count += 1 logger.info("Best MRR: {:.3f}".format(best_mrr)) if count == params['num_patience']: print(params['num_patience'], "times not improve the best result, therefore stop training") break ```
github_jupyter
from google.colab import drive drive.mount('/content/gdrive') import os os.chdir('/content/gdrive/My Drive/finch/tensorflow2/knowledge_graph_completion/wn18/main') %tensorflow_version 2.x !pip install tensorflow-addons from tensorflow_addons.optimizers.cyclical_learning_rate import Triangular2CyclicalLearningRate import tensorflow as tf import pprint import logging import time print("TensorFlow Version", tf.__version__) print('GPU Enabled:', tf.test.is_gpu_available()) def get_vocab(f_path): word2idx = {} with open(f_path) as f: for i, line in enumerate(f): line = line.rstrip() word2idx[line] = i return word2idx """ we use 1vN fast evaluation as purposed in ConvE paper: "https://arxiv.org/abs/1707.01476" sp2o is a dictionary that maps a pair of <subject, predicate> to multiple possible corresponding <objects> in graph """ def make_sp2o(f_paths, e2idx, r2idx): sp2o = {} for f_path in f_paths: with open(f_path) as f: for line in f: line = line.rstrip() s, p, o = line.split() s, p, o = e2idx[s], r2idx[p], e2idx[o] if (s,p) not in sp2o: sp2o[(s,p)] = [o] else: if o not in sp2o[(s,p)]: sp2o[(s,p)].append(o) return sp2o def map_fn(x, y): i, v, s = y[0] one_hot = tf.SparseTensor(i, v, s) return x, (one_hot, y[1], y[2]) # stream data from text files def data_generator(f_path, params, sp2o): with open(f_path) as f: print('Reading', f_path) for line in f: line = line.rstrip() s, p, o = line.split() s, p, o = params['e2idx'][s], params['r2idx'][p], params['e2idx'][o] sparse_i = [[x] for x in sp2o[(s, p)]] sparse_v = [1.] * len(sparse_i) sparse_s = [len(params['e2idx'])] yield ((s, p), ((sparse_i, sparse_v, sparse_s), o, len(sparse_i))) def dataset(is_training, params, sp2o): _shapes = (([], []), (([None, 1], [None], [1]), [], [])) _types = ((tf.int32, tf.int32), ((tf.int64, tf.float32, tf.int64), tf.int32, tf.int32)) if is_training: ds = tf.data.Dataset.from_generator( lambda: data_generator(params['train_path'], params, sp2o), output_shapes = _shapes, output_types = _types,) ds = ds.shuffle(params['num_samples']) ds = ds.map(map_fn) ds = ds.batch(params['batch_size']) else: ds = tf.data.Dataset.from_generator( lambda: data_generator(params['test_path'], params, sp2o), output_shapes = _shapes, output_types = _types,) ds = ds.map(map_fn) ds = ds.batch(params['batch_size']) return ds def update_metrics(scores, query, metrics): to_float = lambda x: tf.cast(x, tf.float32) _, i = tf.math.top_k(scores, sorted=True, k=scores.shape[1]) query = tf.expand_dims(query, 1) is_query = to_float(tf.equal(i, query)) r = tf.argmax(is_query, -1) + 1 mrr = 1. / to_float(r) hits_10 = to_float(tf.less_equal(r, 10)) hits_3 = to_float(tf.less_equal(r, 3)) hits_1 = to_float(tf.less_equal(r, 1)) metrics['mrr'].update_state(mrr) metrics['hits_10'].update_state(hits_10) metrics['hits_3'].update_state(hits_3) metrics['hits_1'].update_state(hits_1) class Complex(tf.keras.Model): def __init__(self, params): super().__init__() self.embed_e_real = tf.keras.layers.Embedding(input_dim=len(params['e2idx']), output_dim=params['embed_dim'], embeddings_initializer=tf.initializers.RandomUniform(), name='Entity_Real') self.embed_e_img = tf.keras.layers.Embedding(input_dim=len(params['e2idx']), output_dim=params['embed_dim'], embeddings_initializer=tf.initializers.RandomUniform(), name='Entity_Img') self.embed_rel_real = tf.keras.layers.Embedding(input_dim=len(params['r2idx']), output_dim=params['embed_dim'], embeddings_initializer=tf.initializers.RandomUniform(), name='Relation_Real') self.embed_rel_img = tf.keras.layers.Embedding(input_dim=len(params['r2idx']), output_dim=params['embed_dim'], embeddings_initializer=tf.initializers.RandomUniform(), name='Relation_Img') self.out_bias = self.add_weight(name='out_bias', shape=[len(params['e2idx'])]) def call(self, inputs): s, p = inputs s_real = self.embed_e_real(s) p_real = self.embed_rel_real(p) s_img = self.embed_e_img(s) p_img = self.embed_rel_img(p) realrealreal = tf.matmul(s_real*p_real, self.embed_e_real.embeddings, transpose_b=True) realimgimg = tf.matmul(s_real*p_img, self.embed_e_img.embeddings, transpose_b=True) imgrealimg = tf.matmul(s_img*p_real, self.embed_e_img.embeddings, transpose_b=True) imgimgreal = tf.matmul(s_img*p_img, self.embed_e_real.embeddings, transpose_b=True) x = realrealreal + realimgimg + imgrealimg - imgimgreal x = tf.nn.bias_add(x, self.out_bias) return x def label_smoothing(inputs, epsilon): V = inputs.get_shape().as_list()[-1] return ((1-epsilon) * inputs) + (epsilon / V) params = { 'train_path': '../data/wn18/train.txt', 'valid_path': '../data/wn18/valid.txt', 'test_path': '../data/wn18/test.txt', 'entity_path': '../vocab/entity.txt', 'relation_path': '../vocab/relation.txt', 'batch_size': 128, 'embed_dim': 200, 'num_samples': 141442, 'init_lr': 1e-4, 'max_lr': 2e-3, 'num_patience': 10, 'epsilon': .1, } params['e2idx'] = get_vocab(params['entity_path']) params['r2idx'] = get_vocab(params['relation_path']) sp2o_tr = make_sp2o([params['train_path']], params['e2idx'], params['r2idx']) sp2o_all = make_sp2o([params['train_path'], params['test_path'], params['valid_path']], params['e2idx'], params['r2idx']) model = Complex(params) model.build(input_shape=[[None], [None]]) pprint.pprint([(v.name, v.shape) for v in model.trainable_variables]) decay_lr = Triangular2CyclicalLearningRate( initial_learning_rate = params['init_lr'], maximal_learning_rate = params['max_lr'], step_size = 8 * params['num_samples'] // params['batch_size'],) optim = tf.optimizers.Adam(params['init_lr']) global_step = 0 best_mrr = 0. count = 0 t0 = time.time() logger = logging.getLogger('tensorflow') logger.setLevel(logging.INFO) while True: # TRAINING for ((s, p), (multi_o, o, num_pos)) in dataset(is_training=True, params=params, sp2o=sp2o_tr): with tf.GradientTape() as tape: logits = model((s, p)) multi_o = tf.sparse.to_dense(multi_o, validate_indices=False) num_neg = len(params['e2idx']) - num_pos pos_weight = tf.expand_dims(tf.cast(num_neg/num_pos, tf.float32), 1) labels = label_smoothing(multi_o, params['epsilon']) loss = tf.nn.weighted_cross_entropy_with_logits(labels=labels, logits=logits, pos_weight=pos_weight) loss = tf.reduce_mean(loss) optim.lr.assign(decay_lr(global_step)) grads = tape.gradient(loss, model.trainable_variables) optim.apply_gradients(zip(grads, model.trainable_variables)) if global_step % 50 == 0: logger.info("Step {} | Loss: {:.4f} | Spent: {:.1f} secs | LR: {:.6f}".format( global_step, loss.numpy().item(), time.time()-t0, optim.lr.numpy().item())) t0 = time.time() global_step += 1 # EVALUATION metrics = { 'mrr': tf.metrics.Mean(), 'hits_10': tf.metrics.Mean(), 'hits_3': tf.metrics.Mean(), 'hits_1': tf.metrics.Mean(), } for ((s, p), (multi_o, o, num_pos)) in dataset(is_training=False, params=params, sp2o=sp2o_all): logits = model((s, p)) multi_o = tf.sparse.to_dense(multi_o, validate_indices=False) # create masks for Filtered MRR o_one_hot = tf.one_hot(o, len(params['e2idx'])) unwanted = multi_o - o_one_hot masks = tf.cast(tf.equal(unwanted, 0.), tf.float32) scores = tf.sigmoid(logits) * masks update_metrics(scores=scores, query=o, metrics=metrics) logger.info("MRR: {:.3f}| Hits@10: {:.3f} | Hits@3: {:.3f} | Hits@1: {:.3f}".format( metrics['mrr'].result().numpy(), metrics['hits_10'].result().numpy(), metrics['hits_3'].result().numpy(), metrics['hits_1'].result().numpy())) mrr = metrics['mrr'].result().numpy() if mrr > best_mrr: best_mrr = mrr # you can save model here count = 0 else: count += 1 logger.info("Best MRR: {:.3f}".format(best_mrr)) if count == params['num_patience']: print(params['num_patience'], "times not improve the best result, therefore stop training") break
0.607663
0.289956
# 7. Visualization This is the seventh in a series of notebooks related to astronomy data. As a continuing example, we will replicate part of the analysis in a recent paper, "[Off the beaten path: Gaia reveals GD-1 stars outside of the main stream](https://arxiv.org/abs/1805.00425)" by Adrian M. Price-Whelan and Ana Bonaca. In the previous notebook we selected photometry data from Pan-STARRS and used it to identify stars we think are likely to be in GD-1 In this notebook, we'll take the results from previous lessons and use them to make a figure that tells a compelling scientific story. ## Outline Here are the steps in this notebook: 1. Starting with the figure from the previous notebook, we'll add annotations to present the results more clearly. 2. The we'll see several ways to customize figures to make them more appealing and effective. 3. Finally, we'll see how to make a figure with multiple panels or subplots. After completing this lesson, you should be able to * Design a figure that tells a compelling story. * Use Matplotlib features to customize the appearance of figures. * Generate a figure with multiple subplots. ## Making Figures That Tell a Story So far the figure we've made have been "quick and dirty". Mostly we have used Matplotlib's default style, although we have adjusted a few parameters, like `markersize` and `alpha`, to improve legibility. Now that the analysis is done, it's time to think more about: 1. Making professional-looking figures that are ready for publication, and 2. Making figures that communicate a scientific result clearly and compellingly. Not necessarily in that order. Let's start by reviewing Figure 1 from the original paper. We've seen the individual panels, but now let's look at the whole thing, along with the caption: <img width="500" src="https://github.com/datacarpentry/astronomy-python/raw/gh-pages/fig/gd1-5.png"> ### Exercise Think about the following questions: 1. What is the primary scientific result of this work? 2. What story is this figure telling? 3. In the design of this figure, can you identify 1-2 choices the authors made that you think are effective? Think about big-picture elements, like the number of panels and how they are arranged, as well as details like the choice of typeface. 4. Can you identify 1-2 elements that could be improved, or that you might have done differently? ``` # Solution goes here ``` ## Plotting GD-1 Let's start with the panel in the lower left. You can [download the data from the previous lesson](https://github.com/AllenDowney/AstronomicalData/raw/main/data/gd1_data.hdf) or run the following cell, which downloads it if necessary. ``` from os.path import basename, exists def download(url): filename = basename(url) if not exists(filename): from urllib.request import urlretrieve local, _ = urlretrieve(url, filename) print('Downloaded ' + local) download('https://github.com/AllenDowney/AstronomicalData/raw/main/' + 'data/gd1_data.hdf') ``` Now we can reload `winner_df` ``` import pandas as pd filename = 'gd1_data.hdf' winner_df = pd.read_hdf(filename, 'winner_df') import matplotlib.pyplot as plt def plot_second_selection(df): x = df['phi1'] y = df['phi2'] plt.plot(x, y, 'ko', markersize=0.7, alpha=0.9) plt.xlabel('$\phi_1$ [deg]') plt.ylabel('$\phi_2$ [deg]') plt.title('Proper motion + photometry selection', fontsize='medium') plt.axis('equal') ``` And here's what it looks like. ``` plt.figure(figsize=(10,2.5)) plot_second_selection(winner_df) ``` ## Annotations The figure in the paper uses three other features to present the results more clearly and compellingly: * A vertical dashed line to distinguish the previously undetected region of GD-1, * A label that identifies the new region, and * Several annotations that combine text and arrows to identify features of GD-1. ### Exercise Choose any or all of these features and add them to the figure: * To draw vertical lines, see [`plt.vlines`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.vlines.html) and [`plt.axvline`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.axvline.html#matplotlib.pyplot.axvline). * To add text, see [`plt.text`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.text.html). * To add an annotation with text and an arrow, see [plt.annotate](). And here is some [additional information about text and arrows](https://matplotlib.org/3.3.1/tutorials/text/annotations.html#plotting-guide-annotation). ``` # Solution goes here ``` ## Customization Matplotlib provides a default style that determines things like the colors of lines, the placement of labels and ticks on the axes, and many other properties. There are several ways to override these defaults and customize your figures: * To customize only the current figure, you can call functions like `tick_params`, which we'll demonstrate below. * To customize all figures in a notebook, you use `rcParams`. * To override more than a few defaults at the same time, you can use a style sheet. As a simple example, notice that Matplotlib puts ticks on the outside of the figures by default, and only on the left and bottom sides of the axes. To change this behavior, you can use `gca()` to get the current axes and `tick_params` to change the settings. Here's how you can put the ticks on the inside of the figure: ``` plt.gca().tick_params(direction='in') ``` ### Exercise Read the documentation of [`tick_params`](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.tick_params.html) and use it to put ticks on the top and right sides of the axes. ``` # Solution goes here ``` ## rcParams If you want to make a customization that applies to all figures in a notebook, you can use `rcParams`. Here's an example that reads the current font size from `rcParams`: ``` plt.rcParams['font.size'] ``` And sets it to a new value: ``` plt.rcParams['font.size'] = 14 ``` As an exercise, plot the previous figure again, and see what font sizes have changed. Look up any other element of `rcParams`, change its value, and check the effect on the figure. If you find yourself making the same customizations in several notebooks, you can put changes to `rcParams` in a `matplotlibrc` file, [which you can read about here](https://matplotlib.org/3.3.1/tutorials/introductory/customizing.html#customizing-with-matplotlibrc-files). ## Style sheets The `matplotlibrc` file is read when you import Matplotlib, so it is not easy to switch from one set of options to another. The solution to this problem is style sheets, [which you can read about here](https://matplotlib.org/3.1.1/tutorials/introductory/customizing.html). Matplotlib provides a set of predefined style sheets, or you can make your own. The following cell displays a list of style sheets installed on your system. ``` plt.style.available ``` Note that `seaborn-paper`, `seaborn-talk` and `seaborn-poster` are particularly intended to prepare versions of a figure with text sizes and other features that work well in papers, talks, and posters. To use any of these style sheets, run `plt.style.use` like this: ``` plt.style.use('fivethirtyeight') ``` The style sheet you choose will affect the appearance of all figures you plot after calling `use`, unless you override any of the options or call `use` again. As an exercise, choose one of the styles on the list and select it by calling `use`. Then go back and plot one of the figures above and see what effect it has. If you can't find a style sheet that's exactly what you want, you can make your own. This repository includes a style sheet called `az-paper-twocol.mplstyle`, with customizations chosen by Azalee Bostroem for publication in astronomy journals. You can [download the style sheet](https://github.com/AllenDowney/AstronomicalData/raw/main/az-paper-twocol.mplstyle) or run the following cell, which downloads it if necessary. ``` download('https://github.com/AllenDowney/AstronomicalData/raw/main/' + 'az-paper-twocol.mplstyle') ``` You can use it like this: ``` plt.style.use('./az-paper-twocol.mplstyle') ``` The prefix `./` tells Matplotlib to look for the file in the current directory. As an alternative, you can install a style sheet for your own use by putting it in your configuration directory. To find out where that is, you can run the following command: ``` import matplotlib as mpl mpl.get_configdir() ``` ## LaTeX fonts When you include mathematical expressions in titles, labels, and annotations, Matplotlib uses [`mathtext`](https://matplotlib.org/3.1.0/tutorials/text/mathtext.html) to typeset them. `mathtext` uses the same syntax as LaTeX, but it provides only a subset of its features. If you need features that are not provided by `mathtext`, or you prefer the way LaTeX typesets mathematical expressions, you can customize Matplotlib to use LaTeX. In `matplotlibrc` or in a style sheet, you can add the following line: ``` text.usetex : true ``` Or in a notebook you can run the following code. ``` plt.rcParams['text.usetex'] = True ``` ``` plt.rcParams['text.usetex'] = True ``` If you go back and draw the figure again, you should see the difference. If you get an error message like ``` LaTeX Error: File `type1cm.sty' not found. ``` You might have to install a package that contains the fonts LaTeX needs. On some systems, the packages `texlive-latex-extra` or `cm-super` might be what you need. [See here for more help with this](https://stackoverflow.com/questions/11354149/python-unable-to-render-tex-in-matplotlib). In case you are curious, `cm` stands for [Computer Modern](https://en.wikipedia.org/wiki/Computer_Modern), the font LaTeX uses to typeset math. Before we go on, let's put things back where we found them. ``` plt.rcParams['text.usetex'] = False plt.style.use('default') ``` ## Multiple panels So far we've been working with one figure at a time, but the figure we are replicating contains multiple panels, also known as "subplots". Confusingly, Matplotlib provides *three* functions for making figures like this: `subplot`, `subplots`, and `subplot2grid`. * [`subplot`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.subplot.html) is simple and similar to MATLAB, so if you are familiar with that interface, you might like `subplot` * [`subplots`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.subplots.html) is more object-oriented, which some people prefer. * [`subplot2grid`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.subplot2grid.html) is most convenient if you want to control the relative sizes of the subplots. So we'll use `subplot2grid`. All of these functions are easier to use if we put the code that generates each panel in a function. ## Upper right To make the panel in the upper right, we have to reload `centerline_df`. ``` filename = 'gd1_data.hdf' centerline_df = pd.read_hdf(filename, 'centerline_df') ``` And define the coordinates of the rectangle we selected. ``` pm1_min = -8.9 pm1_max = -6.9 pm2_min = -2.2 pm2_max = 1.0 pm1_rect = [pm1_min, pm1_min, pm1_max, pm1_max] pm2_rect = [pm2_min, pm2_max, pm2_max, pm2_min] ``` To plot this rectangle, we'll use a feature we have not seen before: `Polygon`, which is provided by Matplotlib. To create a `Polygon`, we have to put the coordinates in an array with `x` values in the first column and `y` values in the second column. ``` import numpy as np vertices = np.transpose([pm1_rect, pm2_rect]) vertices ``` The following function takes a `DataFrame` as a parameter, plots the proper motion for each star, and adds a shaded `Polygon` to show the region we selected. ``` from matplotlib.patches import Polygon def plot_proper_motion(df): pm1 = df['pm_phi1'] pm2 = df['pm_phi2'] plt.plot(pm1, pm2, 'ko', markersize=0.3, alpha=0.3) poly = Polygon(vertices, closed=True, facecolor='C1', alpha=0.4) plt.gca().add_patch(poly) plt.xlabel('$\mu_{\phi_1} [\mathrm{mas~yr}^{-1}]$') plt.ylabel('$\mu_{\phi_2} [\mathrm{mas~yr}^{-1}]$') plt.xlim(-12, 8) plt.ylim(-10, 10) ``` Notice that `add_patch` is like `invert_yaxis`; in order to call it, we have to use `gca` to get the current axes. Here's what the new version of the figure looks like. We've changed the labels on the axes to be consistent with the paper. ``` plot_proper_motion(centerline_df) ``` ## Upper left Now let's work on the panel in the upper left. We have to reload `candidates`. ``` filename = 'gd1_data.hdf' candidate_df = pd.read_hdf(filename, 'candidate_df') ``` Here's a function that takes a `DataFrame` of candidate stars and plots their positions in GD-1 coordindates. ``` def plot_first_selection(df): x = df['phi1'] y = df['phi2'] plt.plot(x, y, 'ko', markersize=0.3, alpha=0.3) plt.xlabel('$\phi_1$ [deg]') plt.ylabel('$\phi_2$ [deg]') plt.title('Proper motion selection', fontsize='medium') plt.axis('equal') ``` And here's what it looks like. ``` plot_first_selection(candidate_df) ``` ## Lower right For the figure in the lower right, we'll use this function to plots the color-magnitude diagram. ``` import matplotlib.pyplot as plt def plot_cmd(table): """Plot a color magnitude diagram. table: Table or DataFrame with photometry data """ y = table['g_mean_psf_mag'] x = table['g_mean_psf_mag'] - table['i_mean_psf_mag'] plt.plot(x, y, 'ko', markersize=0.3, alpha=0.3) plt.xlim([0, 1.5]) plt.ylim([14, 22]) plt.gca().invert_yaxis() plt.ylabel('$Magnitude (g)$') plt.xlabel('$Color (g-i)$') ``` Here's what it looks like. ``` plot_cmd(candidate_df) ``` And here's how we read it back. ``` filename = 'gd1_data.hdf' loop_df = pd.read_hdf(filename, 'loop_df') loop_df.head() ``` ### Exercise Add a few lines to `plot_cmd` to show the polygon we selected as a shaded area. Hint: pass `coords` as an argument to `Polygon` and plot it using `add_patch`. ``` # Solution goes here ``` ## Subplots Now we're ready to put it all together. To make a figure with four subplots, we'll use `subplot2grid`, [which requires two arguments](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.subplot2grid.html): * `shape`, which is a tuple with the number of rows and columns in the grid, and * `loc`, which is a tuple identifying the location in the grid we're about to fill. In this example, `shape` is `(2, 2)` to create two rows and two columns. For the first panel, `loc` is `(0, 0)`, which indicates row 0 and column 0, which is the upper-left panel. Here's how we use it to draw the four panels. ``` shape = (2, 2) plt.subplot2grid(shape, (0, 0)) plot_first_selection(candidate_df) plt.subplot2grid(shape, (0, 1)) plot_proper_motion(centerline_df) plt.subplot2grid(shape, (1, 0)) plot_second_selection(winner_df) plt.subplot2grid(shape, (1, 1)) plot_cmd(candidate_df) poly = Polygon(loop_df, closed=True, facecolor='C1', alpha=0.4) plt.gca().add_patch(poly) plt.tight_layout() ``` We use [`plt.tight_layout`](https://matplotlib.org/3.3.1/tutorials/intermediate/tight_layout_guide.html) at the end, which adjusts the sizes of the panels to make sure the titles and axis labels don't overlap. As an exercise, see what happens if you leave out `tight_layout`. ## Adjusting proportions In the previous figure, the panels are all the same size. To get a better view of GD-1, we'd like to stretch the panels on the left and compress the ones on the right. To do that, we'll use the `colspan` argument to make a panel that spans multiple columns in the grid. In the following example, `shape` is `(2, 4)`, which means 2 rows and 4 columns. The panels on the left span three columns, so they are three times wider than the panels on the right. At the same time, we use `figsize` to adjust the aspect ratio of the whole figure. ``` plt.figure(figsize=(9, 4.5)) shape = (2, 4) plt.subplot2grid(shape, (0, 0), colspan=3) plot_first_selection(candidate_df) plt.subplot2grid(shape, (0, 3)) plot_proper_motion(centerline_df) plt.subplot2grid(shape, (1, 0), colspan=3) plot_second_selection(winner_df) plt.subplot2grid(shape, (1, 3)) plot_cmd(candidate_df) poly = Polygon(loop_df, closed=True, facecolor='C1', alpha=0.4) plt.gca().add_patch(poly) plt.tight_layout() ``` This is looking more and more like the figure in the paper. ### Exercise In this example, the ratio of the widths of the panels is 3:1. How would you adjust it if you wanted the ratio to be 3:2? ``` # Solution goes here ``` ## Summary In this notebook, we reverse-engineered the figure we've been replicating, identifying elements that seem effective and others that could be improved. We explored features Matplotlib provides for adding annotations to figures -- including text, lines, arrows, and polygons -- and several ways to customize the appearance of figures. And we learned how to create figures that contain multiple panels. ## Best practices * The most effective figures focus on telling a single story clearly and compellingly. * Consider using annotations to guide the reader's attention to the most important elements of a figure. * The default Matplotlib style generates good quality figures, but there are several ways you can override the defaults. * If you find yourself making the same customizations on several projects, you might want to create your own style sheet.
github_jupyter
# Solution goes here from os.path import basename, exists def download(url): filename = basename(url) if not exists(filename): from urllib.request import urlretrieve local, _ = urlretrieve(url, filename) print('Downloaded ' + local) download('https://github.com/AllenDowney/AstronomicalData/raw/main/' + 'data/gd1_data.hdf') import pandas as pd filename = 'gd1_data.hdf' winner_df = pd.read_hdf(filename, 'winner_df') import matplotlib.pyplot as plt def plot_second_selection(df): x = df['phi1'] y = df['phi2'] plt.plot(x, y, 'ko', markersize=0.7, alpha=0.9) plt.xlabel('$\phi_1$ [deg]') plt.ylabel('$\phi_2$ [deg]') plt.title('Proper motion + photometry selection', fontsize='medium') plt.axis('equal') plt.figure(figsize=(10,2.5)) plot_second_selection(winner_df) # Solution goes here plt.gca().tick_params(direction='in') # Solution goes here plt.rcParams['font.size'] plt.rcParams['font.size'] = 14 plt.style.available plt.style.use('fivethirtyeight') download('https://github.com/AllenDowney/AstronomicalData/raw/main/' + 'az-paper-twocol.mplstyle') plt.style.use('./az-paper-twocol.mplstyle') import matplotlib as mpl mpl.get_configdir() text.usetex : true plt.rcParams['text.usetex'] = True plt.rcParams['text.usetex'] = True LaTeX Error: File `type1cm.sty' not found. plt.rcParams['text.usetex'] = False plt.style.use('default') filename = 'gd1_data.hdf' centerline_df = pd.read_hdf(filename, 'centerline_df') pm1_min = -8.9 pm1_max = -6.9 pm2_min = -2.2 pm2_max = 1.0 pm1_rect = [pm1_min, pm1_min, pm1_max, pm1_max] pm2_rect = [pm2_min, pm2_max, pm2_max, pm2_min] import numpy as np vertices = np.transpose([pm1_rect, pm2_rect]) vertices from matplotlib.patches import Polygon def plot_proper_motion(df): pm1 = df['pm_phi1'] pm2 = df['pm_phi2'] plt.plot(pm1, pm2, 'ko', markersize=0.3, alpha=0.3) poly = Polygon(vertices, closed=True, facecolor='C1', alpha=0.4) plt.gca().add_patch(poly) plt.xlabel('$\mu_{\phi_1} [\mathrm{mas~yr}^{-1}]$') plt.ylabel('$\mu_{\phi_2} [\mathrm{mas~yr}^{-1}]$') plt.xlim(-12, 8) plt.ylim(-10, 10) plot_proper_motion(centerline_df) filename = 'gd1_data.hdf' candidate_df = pd.read_hdf(filename, 'candidate_df') def plot_first_selection(df): x = df['phi1'] y = df['phi2'] plt.plot(x, y, 'ko', markersize=0.3, alpha=0.3) plt.xlabel('$\phi_1$ [deg]') plt.ylabel('$\phi_2$ [deg]') plt.title('Proper motion selection', fontsize='medium') plt.axis('equal') plot_first_selection(candidate_df) import matplotlib.pyplot as plt def plot_cmd(table): """Plot a color magnitude diagram. table: Table or DataFrame with photometry data """ y = table['g_mean_psf_mag'] x = table['g_mean_psf_mag'] - table['i_mean_psf_mag'] plt.plot(x, y, 'ko', markersize=0.3, alpha=0.3) plt.xlim([0, 1.5]) plt.ylim([14, 22]) plt.gca().invert_yaxis() plt.ylabel('$Magnitude (g)$') plt.xlabel('$Color (g-i)$') plot_cmd(candidate_df) filename = 'gd1_data.hdf' loop_df = pd.read_hdf(filename, 'loop_df') loop_df.head() # Solution goes here shape = (2, 2) plt.subplot2grid(shape, (0, 0)) plot_first_selection(candidate_df) plt.subplot2grid(shape, (0, 1)) plot_proper_motion(centerline_df) plt.subplot2grid(shape, (1, 0)) plot_second_selection(winner_df) plt.subplot2grid(shape, (1, 1)) plot_cmd(candidate_df) poly = Polygon(loop_df, closed=True, facecolor='C1', alpha=0.4) plt.gca().add_patch(poly) plt.tight_layout() plt.figure(figsize=(9, 4.5)) shape = (2, 4) plt.subplot2grid(shape, (0, 0), colspan=3) plot_first_selection(candidate_df) plt.subplot2grid(shape, (0, 3)) plot_proper_motion(centerline_df) plt.subplot2grid(shape, (1, 0), colspan=3) plot_second_selection(winner_df) plt.subplot2grid(shape, (1, 3)) plot_cmd(candidate_df) poly = Polygon(loop_df, closed=True, facecolor='C1', alpha=0.4) plt.gca().add_patch(poly) plt.tight_layout() # Solution goes here
0.606732
0.990273
<a id="top"></a> <center><h1>Source code for scraping a single NBA player stats from ESPN's web site</h1></center> My NBA web scraper only scrapes current NBA players stats. But, you can get the classic players like Michael Jordan by obtaining their stats' url from espn.go.com and entering it in these scripts below. ### Quick Links - [source code for regular season averages](#season_avgs) - [source code for regular season totals](#season_totals) - [source code for reguarl season misc totals](#season_misc) - [sqlite table definitions](#sqlite_tables) <a id="season_avgs"></a> ## Populating the regular season averages table [[back to top]](#top) ``` import urllib.request as request from bs4 import BeautifulSoup import sqlite3 import re from datetime import datetime # http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in range(0, len(l), n): yield l[i:i+n] #player = 'http://espn.go.com/nba/player/stats/_/id/1035/michael-jordan' player = input("Paste NBA player's URL here: ") player_id = player.split('/')[8] html_player = request.urlopen(player) soup_player = BeautifulSoup(html_player,'lxml') soup_name = soup_player.find('meta', property='og:title') player_name = soup_name['content'] regular_season_stats = soup_player.find_all('tr', class_=re.compile('row')) size = int(len(regular_season_stats)/3) season_avgs_slice = slice(0,size) #season_totals_slice = slice(size,size*2) #season_misc_totals_slice = slice(size*2,size*3) regular_season_avgs = regular_season_stats[season_avgs_slice] #regular_season_totals = regular_season_stats[season_totals_slice] #regular_season_misc_totals = regular_season_stats[season_misc_totals_slice] avgs = [] for row in regular_season_avgs: if len(row) == 20: # Only accept row that has complete data for data in row: avgs.append(data.get_text()) else: pass index = 0 # insert the player ID before the player's season increment = 0 for row in range(len(regular_season_avgs)): avgs.insert(index + increment, player_id) index = index + 20 # There are 20 columns in the season avgs section increment = increment + 1 index = 1 # insert the player's name after the player's ID increment = 0 for row in range(len(regular_season_avgs)): avgs.insert(index + increment, player_name) index = index + 21 # There are 21 columns in the season avgs section since I've just added player ID increment = increment + 1 conn = sqlite3.connect('/home/pybokeh/databases/nba.db') c = conn.cursor() for data in chunks(avgs,22): try: c.execute('INSERT INTO regular_season_avgs VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', data) except: pass conn.commit() conn.close() ``` <a id="season_totals"></a> ## Populating the regular season totals table [[back to top]](#top) ``` import urllib.request as request from bs4 import BeautifulSoup import sqlite3 import re from datetime import datetime # http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in range(0, len(l), n): yield l[i:i+n] #player = 'http://espn.go.com/nba/player/stats/_/id/1035/michael-jordan' player = input("Paste NBA player's URL here: ") player_id = player.split('/')[8] html_player = request.urlopen(player) soup_player = BeautifulSoup(html_player,'lxml') soup_name = soup_player.find('meta', property='og:title') player_name = soup_name['content'] regular_season_stats = soup_player.find_all('tr', class_=re.compile('row')) size = int(len(regular_season_stats)/3) #season_avgs_slice = slice(0,size) season_totals_slice = slice(size,size*2) #season_misc_totals_slice = slice(size*2,size*3) #regular_season_avgs = regular_season_stats[season_avgs_slice] regular_season_totals = regular_season_stats[season_totals_slice] #regular_season_misc_totals = regular_season_stats[season_misc_totals_slice] totals = [] for row in regular_season_totals: if len(row) == 17: # Only accept row that has complete data for data in row: totals.append(data.get_text()) else: pass index = 0 # insert the player ID before the player's season increment = 0 for row in range(len(regular_season_totals)): totals.insert(index + increment, player_id) index = index + 17 # There are 17 columns in the season totals section increment = increment + 1 index = 1 # insert the player's name after the player's ID increment = 0 for row in range(len(regular_season_totals)): totals.insert(index + increment, player_name) index = index + 18 # There are now 18 columns in the reg season totals after inserting player's ID increment = increment + 1 conn = sqlite3.connect('/home/pybokeh/databases/nba.db') c = conn.cursor() for data in chunks(totals,19): try: c.execute('INSERT INTO regular_season_totals VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', data) except: pass conn.commit() conn.close() ``` <a id="season_misc"></a> ## Populating regular season misc totals table [[back to top]](#top) ``` import urllib.request as request from bs4 import BeautifulSoup import sqlite3 import re from datetime import datetime # http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in range(0, len(l), n): yield l[i:i+n] #player = 'http://espn.go.com/nba/player/stats/_/id/1035/michael-jordan' player = input("Paste NBA player's URL here: ") player_id = player.split('/')[8] html_player = request.urlopen(player) soup_player = BeautifulSoup(html_player,'lxml') soup_name = soup_player.find('meta', property='og:title') player_name = soup_name['content'] regular_season_stats = soup_player.find_all('tr', class_=re.compile('row')) size = int(len(regular_season_stats)/3) #season_avgs_slice = slice(0,size) #season_totals_slice = slice(size,size*2) season_misc_totals_slice = slice(size*2,size*3) #regular_season_avgs = regular_season_stats[season_avgs_slice] #regular_season_totals = regular_season_stats[season_totals_slice] regular_season_misc_totals = regular_season_stats[season_misc_totals_slice] misc_totals = [] for row in regular_season_misc_totals: if len(row) == 13: # Only accept row that has complete data for data in row: misc_totals.append(data.get_text()) else: pass index = 0 # insert the player ID before the player's season increment = 0 for row in range(len(regular_season_misc_totals)): misc_totals.insert(index + increment, player_id) index = index + 13 # There are 13 columns in the season misc totals section increment = increment + 1 index = 1 # insert the player's name after the player's ID increment = 0 for row in range(len(regular_season_misc_totals)): misc_totals.insert(index + increment, player_name) index = index + 14 # There are now 14 columns in the reg season misc totals after inserting player ID increment = increment + 1 conn = sqlite3.connect('/home/pybokeh/databases/nba.db') c = conn.cursor() for data in chunks(misc_totals,15): try: c.execute('INSERT INTO regular_season_misc_totals VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', data) except: pass conn.commit() conn.close() ``` <a id="sqlite_tables"></a> ## sqlite table definitions [[back to top]](#top) ``` CREATE TABLE "player_game_stats" ( "id" INTEGER PRIMARY KEY NOT NULL, "name_pos" TEXT NOT NULL, "team_name" TEXT NOT NULL, "GP" INTEGER NOT NULL, "GS" INTEGER NOT NULL, "MIN" REAL NOT NULL, "PPG" REAL NOT NULL, "OFFR" REAL NOT NULL, "DEFR" REAL NOT NULL, "RPG" REAL NOT NULL, "APG" REAL NOT NULL, "SPG" REAL NOT NULL, "BPG" REAL NOT NULL, "TPG" REAL NOT NULL, "FPG" REAL NOT NULL, "A2TO" REAL NOT NULL, "PER" REAL NOT NULL ); CREATE TABLE "player_shooting_stats" ( "id" INTEGER PRIMARY KEY NOT NULL, "name_pos" TEXT NOT NULL, "team_name" TEXT NOT NULL, "FGM" REAL NOT NULL, "FGA" REAL NOT NULL, "FG_Perc" REAL NOT NULL, "3PM" REAL NOT NULL, "3PA" REAL NOT NULL, "3P_Perc" REAL NOT NULL, "FTM" REAL NOT NULL, "FTA" REAL NOT NULL, "FT_Perc" REAL NOT NULL, "2PM" REAL NOT NULL, "2PA" REAL NOT NULL, "2P_Perc" REAL NOT NULL, "PPS" REAL NOT NULL, "AFG_Perc" REAL NOT NULL ); CREATE TABLE "regular_season_avgs" ( "id" INTEGER NOT NULL, "season" TEXT NOT NULL, "team" TEXT NOT NULL, "GP" INTEGER NOT NULL, "GS" INTEGER NOT NULL, "MIN" REAL NOT NULL, "FGM-A" TEXT NOT NULL, "FG_Perc" REAL NOT NULL, "3PM-A" TEXT NOT NULL, "3P_Perc" REAL NOT NULL, "FTM-A" TEXT NOT NULL, "FT_Perc" REAL NOT NULL, "OR" REAL NOT NULL, "DR" REAL NOT NULL, "REB" REAL NOT NULL, "AST" REAL NOT NULL, "BLK" REAL NOT NULL, "STL" REAL NOT NULL, "PF" REAL NOT NULL, "TO" REAL NOT NULL, "PTS" REAL NOT NULL, unique ("id", "season","team") ); CREATE TABLE "regular_season_totals" ( "id" INTEGER NOT NULL, "season" TEXT NOT NULL, "team" TEXT NOT NULL, "FGM-A" TEXT NOT NULL, "FG_Perc" REAL NOT NULL, "3PM-A" TEXT NOT NULL, "3P_Perc" REAL NOT NULL, "FTM-A" TEXT NOT NULL, "FT_Perc" REAL NOT NULL, "OR" INTEGER NOT NULL, "DR" INTEGER NOT NULL, "REB" INTEGER NOT NULL, "AST" INTEGER NOT NULL, "BLK" INTEGER NOT NULL, "STL" INTEGER NOT NULL, "PF" INTEGER NOT NULL, "TO" INTEGER NOT NULL, "PTS" INTEGER NOT NULL, unique ("id","season","team") ); CREATE TABLE "regular_season_misc_totals" ( "id" INTEGER NOT NULL, "season" TEXT NOT NULL, "team" TEXT NOT NULL, "DBLDBL" INTEGER NOT NULL, "TRIDBL" INTEGER NOT NULL, "DQ" INTEGER NOT NULL, "EJECT" INTEGER NOT NULL, "TECH" INTEGER NOT NULL, "FLAG" INTEGER NOT NULL, "AST2TO" REAL NOT NULL, "STL2TO" REAL NOT NULL, "RAT" REAL NOT NULL, "SCEFF" REAL NOT NULL, "SHEFF" REAL NOT NULL, unique("id","season","team") ); ``` [[back to top]](#top)
github_jupyter
import urllib.request as request from bs4 import BeautifulSoup import sqlite3 import re from datetime import datetime # http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in range(0, len(l), n): yield l[i:i+n] #player = 'http://espn.go.com/nba/player/stats/_/id/1035/michael-jordan' player = input("Paste NBA player's URL here: ") player_id = player.split('/')[8] html_player = request.urlopen(player) soup_player = BeautifulSoup(html_player,'lxml') soup_name = soup_player.find('meta', property='og:title') player_name = soup_name['content'] regular_season_stats = soup_player.find_all('tr', class_=re.compile('row')) size = int(len(regular_season_stats)/3) season_avgs_slice = slice(0,size) #season_totals_slice = slice(size,size*2) #season_misc_totals_slice = slice(size*2,size*3) regular_season_avgs = regular_season_stats[season_avgs_slice] #regular_season_totals = regular_season_stats[season_totals_slice] #regular_season_misc_totals = regular_season_stats[season_misc_totals_slice] avgs = [] for row in regular_season_avgs: if len(row) == 20: # Only accept row that has complete data for data in row: avgs.append(data.get_text()) else: pass index = 0 # insert the player ID before the player's season increment = 0 for row in range(len(regular_season_avgs)): avgs.insert(index + increment, player_id) index = index + 20 # There are 20 columns in the season avgs section increment = increment + 1 index = 1 # insert the player's name after the player's ID increment = 0 for row in range(len(regular_season_avgs)): avgs.insert(index + increment, player_name) index = index + 21 # There are 21 columns in the season avgs section since I've just added player ID increment = increment + 1 conn = sqlite3.connect('/home/pybokeh/databases/nba.db') c = conn.cursor() for data in chunks(avgs,22): try: c.execute('INSERT INTO regular_season_avgs VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', data) except: pass conn.commit() conn.close() import urllib.request as request from bs4 import BeautifulSoup import sqlite3 import re from datetime import datetime # http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in range(0, len(l), n): yield l[i:i+n] #player = 'http://espn.go.com/nba/player/stats/_/id/1035/michael-jordan' player = input("Paste NBA player's URL here: ") player_id = player.split('/')[8] html_player = request.urlopen(player) soup_player = BeautifulSoup(html_player,'lxml') soup_name = soup_player.find('meta', property='og:title') player_name = soup_name['content'] regular_season_stats = soup_player.find_all('tr', class_=re.compile('row')) size = int(len(regular_season_stats)/3) #season_avgs_slice = slice(0,size) season_totals_slice = slice(size,size*2) #season_misc_totals_slice = slice(size*2,size*3) #regular_season_avgs = regular_season_stats[season_avgs_slice] regular_season_totals = regular_season_stats[season_totals_slice] #regular_season_misc_totals = regular_season_stats[season_misc_totals_slice] totals = [] for row in regular_season_totals: if len(row) == 17: # Only accept row that has complete data for data in row: totals.append(data.get_text()) else: pass index = 0 # insert the player ID before the player's season increment = 0 for row in range(len(regular_season_totals)): totals.insert(index + increment, player_id) index = index + 17 # There are 17 columns in the season totals section increment = increment + 1 index = 1 # insert the player's name after the player's ID increment = 0 for row in range(len(regular_season_totals)): totals.insert(index + increment, player_name) index = index + 18 # There are now 18 columns in the reg season totals after inserting player's ID increment = increment + 1 conn = sqlite3.connect('/home/pybokeh/databases/nba.db') c = conn.cursor() for data in chunks(totals,19): try: c.execute('INSERT INTO regular_season_totals VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', data) except: pass conn.commit() conn.close() import urllib.request as request from bs4 import BeautifulSoup import sqlite3 import re from datetime import datetime # http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in range(0, len(l), n): yield l[i:i+n] #player = 'http://espn.go.com/nba/player/stats/_/id/1035/michael-jordan' player = input("Paste NBA player's URL here: ") player_id = player.split('/')[8] html_player = request.urlopen(player) soup_player = BeautifulSoup(html_player,'lxml') soup_name = soup_player.find('meta', property='og:title') player_name = soup_name['content'] regular_season_stats = soup_player.find_all('tr', class_=re.compile('row')) size = int(len(regular_season_stats)/3) #season_avgs_slice = slice(0,size) #season_totals_slice = slice(size,size*2) season_misc_totals_slice = slice(size*2,size*3) #regular_season_avgs = regular_season_stats[season_avgs_slice] #regular_season_totals = regular_season_stats[season_totals_slice] regular_season_misc_totals = regular_season_stats[season_misc_totals_slice] misc_totals = [] for row in regular_season_misc_totals: if len(row) == 13: # Only accept row that has complete data for data in row: misc_totals.append(data.get_text()) else: pass index = 0 # insert the player ID before the player's season increment = 0 for row in range(len(regular_season_misc_totals)): misc_totals.insert(index + increment, player_id) index = index + 13 # There are 13 columns in the season misc totals section increment = increment + 1 index = 1 # insert the player's name after the player's ID increment = 0 for row in range(len(regular_season_misc_totals)): misc_totals.insert(index + increment, player_name) index = index + 14 # There are now 14 columns in the reg season misc totals after inserting player ID increment = increment + 1 conn = sqlite3.connect('/home/pybokeh/databases/nba.db') c = conn.cursor() for data in chunks(misc_totals,15): try: c.execute('INSERT INTO regular_season_misc_totals VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', data) except: pass conn.commit() conn.close() CREATE TABLE "player_game_stats" ( "id" INTEGER PRIMARY KEY NOT NULL, "name_pos" TEXT NOT NULL, "team_name" TEXT NOT NULL, "GP" INTEGER NOT NULL, "GS" INTEGER NOT NULL, "MIN" REAL NOT NULL, "PPG" REAL NOT NULL, "OFFR" REAL NOT NULL, "DEFR" REAL NOT NULL, "RPG" REAL NOT NULL, "APG" REAL NOT NULL, "SPG" REAL NOT NULL, "BPG" REAL NOT NULL, "TPG" REAL NOT NULL, "FPG" REAL NOT NULL, "A2TO" REAL NOT NULL, "PER" REAL NOT NULL ); CREATE TABLE "player_shooting_stats" ( "id" INTEGER PRIMARY KEY NOT NULL, "name_pos" TEXT NOT NULL, "team_name" TEXT NOT NULL, "FGM" REAL NOT NULL, "FGA" REAL NOT NULL, "FG_Perc" REAL NOT NULL, "3PM" REAL NOT NULL, "3PA" REAL NOT NULL, "3P_Perc" REAL NOT NULL, "FTM" REAL NOT NULL, "FTA" REAL NOT NULL, "FT_Perc" REAL NOT NULL, "2PM" REAL NOT NULL, "2PA" REAL NOT NULL, "2P_Perc" REAL NOT NULL, "PPS" REAL NOT NULL, "AFG_Perc" REAL NOT NULL ); CREATE TABLE "regular_season_avgs" ( "id" INTEGER NOT NULL, "season" TEXT NOT NULL, "team" TEXT NOT NULL, "GP" INTEGER NOT NULL, "GS" INTEGER NOT NULL, "MIN" REAL NOT NULL, "FGM-A" TEXT NOT NULL, "FG_Perc" REAL NOT NULL, "3PM-A" TEXT NOT NULL, "3P_Perc" REAL NOT NULL, "FTM-A" TEXT NOT NULL, "FT_Perc" REAL NOT NULL, "OR" REAL NOT NULL, "DR" REAL NOT NULL, "REB" REAL NOT NULL, "AST" REAL NOT NULL, "BLK" REAL NOT NULL, "STL" REAL NOT NULL, "PF" REAL NOT NULL, "TO" REAL NOT NULL, "PTS" REAL NOT NULL, unique ("id", "season","team") ); CREATE TABLE "regular_season_totals" ( "id" INTEGER NOT NULL, "season" TEXT NOT NULL, "team" TEXT NOT NULL, "FGM-A" TEXT NOT NULL, "FG_Perc" REAL NOT NULL, "3PM-A" TEXT NOT NULL, "3P_Perc" REAL NOT NULL, "FTM-A" TEXT NOT NULL, "FT_Perc" REAL NOT NULL, "OR" INTEGER NOT NULL, "DR" INTEGER NOT NULL, "REB" INTEGER NOT NULL, "AST" INTEGER NOT NULL, "BLK" INTEGER NOT NULL, "STL" INTEGER NOT NULL, "PF" INTEGER NOT NULL, "TO" INTEGER NOT NULL, "PTS" INTEGER NOT NULL, unique ("id","season","team") ); CREATE TABLE "regular_season_misc_totals" ( "id" INTEGER NOT NULL, "season" TEXT NOT NULL, "team" TEXT NOT NULL, "DBLDBL" INTEGER NOT NULL, "TRIDBL" INTEGER NOT NULL, "DQ" INTEGER NOT NULL, "EJECT" INTEGER NOT NULL, "TECH" INTEGER NOT NULL, "FLAG" INTEGER NOT NULL, "AST2TO" REAL NOT NULL, "STL2TO" REAL NOT NULL, "RAT" REAL NOT NULL, "SCEFF" REAL NOT NULL, "SHEFF" REAL NOT NULL, unique("id","season","team") );
0.253122
0.735086
# GBML-Pandas demo. Creates descriptor dataframe using the data fetched from materialsproject database and use gbml to make predictions. This is a simplified translation of gbml/elasticity.py. Require: pandas package Author: Kiran Mathew ``` import os import json from collections import defaultdict import numpy as np import pandas as pd from pymatgen.core.periodic_table import Element from pymatgen.core.composition import Composition from pymatgen.ext.matproj import MPRester import gbml ``` Read the data file containing the "atom in a box" energy for the elements ``` DATAFILE_AIAB = os.path.join(os.path.dirname(gbml.__file__), "data", "element_aiab_energy.json") aiab_energy_dict = {} try: with open(DATAFILE_AIAB,'r') as json_file: aiab_energy_dict = json.load(json_file) except: print "failed to open the data file" def holder_mean(x, w, p): """ generalized mean """ return np.power(np.dot(w, np.power(x, p)), 1./p) def get_aiab_energy(element): """ return atom in a box energy for the given element """ d = aiab_energy_dict.get(str(element), None) if d: return d[0] return None def append_data(df, d, axis=1): """ concatenate the given data to the DataFrame along the specified axis Args: df (DataFrame): input DataFrame d (dict): data to be appended as dict. The keys become the column names axis (int): concat direction """ df2 = pd.DataFrame(d, index=df.index) return pd.concat([df, df2], axis=axis) ``` Get the raw data from materials project database and convert to pandas DataFrame. ``` mpids = ["mp-10003","mp-10010","mp-10015","mp-10021","mp-26","mp-10018","mp-19306"] properties=["pretty_formula", "nsites", "volume", "energy_per_atom"] data = defaultdict(list) MAPI_KEY = os.environ.get("MAPI_KEY", "") with MPRester(MAPI_KEY) as mpr: for entry in mpr.query(criteria={"task_id": {"$in": mpids}}, properties=properties): for p in properties: data[p].append(entry[p]) df = pd.DataFrame(data, index=mpids) df.head(20) ``` Add processed data(average electroneagtivity, average rows etc) to the DataFrame ``` d =defaultdict(list) for formula in df.pretty_formula: comp = Composition(formula) rows = [] el_negs = [] weights = [] energies = [] for el in comp: rows.append(el.row) el_negs.append(el.X) weights.append(comp.get_atomic_fraction(el)) energies.append(get_aiab_energy(el)) d["rows_avg"].append(holder_mean(rows, weights, 1.0)) d["X_avg"].append(holder_mean(rows, weights, -4.0)) d["reference_energy"].append(np.average(energies, weights=weights)) df = append_data(df, d) df.head(20) ``` Pre-process volume and energy data ``` df["log_volume_per_atom"] = np.log10(df.volume/df.nsites) df["energy_per_atom"] = df.energy_per_atom - df.reference_energy df.head(20) ``` Drop columns that are not needed ``` df.drop(["pretty_formula","volume", "nsites", "reference_energy"], axis=1, inplace=True) df.head(20) cols = df.columns.tolist() print(cols) ``` Rearrange the columns ``` cols = [cols[3], cols[2], cols[0], cols[1] ] df = df[cols] df.head(20) ``` Use the python interface to the gbml library to make predictions ``` import gbml.core num_predictions = len(mpids) k_descriptors = df.values k_predictions = np.empty(num_predictions) # Make predictions k_filename = os.path.join(os.path.dirname(gbml.__file__), "data", "gbml-K-v1.00.data") gbml.core.predict(k_filename, num_predictions, k_descriptors, k_predictions) k_list = np.power(10.0, k_predictions).tolist() print k_list ```
github_jupyter
import os import json from collections import defaultdict import numpy as np import pandas as pd from pymatgen.core.periodic_table import Element from pymatgen.core.composition import Composition from pymatgen.ext.matproj import MPRester import gbml DATAFILE_AIAB = os.path.join(os.path.dirname(gbml.__file__), "data", "element_aiab_energy.json") aiab_energy_dict = {} try: with open(DATAFILE_AIAB,'r') as json_file: aiab_energy_dict = json.load(json_file) except: print "failed to open the data file" def holder_mean(x, w, p): """ generalized mean """ return np.power(np.dot(w, np.power(x, p)), 1./p) def get_aiab_energy(element): """ return atom in a box energy for the given element """ d = aiab_energy_dict.get(str(element), None) if d: return d[0] return None def append_data(df, d, axis=1): """ concatenate the given data to the DataFrame along the specified axis Args: df (DataFrame): input DataFrame d (dict): data to be appended as dict. The keys become the column names axis (int): concat direction """ df2 = pd.DataFrame(d, index=df.index) return pd.concat([df, df2], axis=axis) mpids = ["mp-10003","mp-10010","mp-10015","mp-10021","mp-26","mp-10018","mp-19306"] properties=["pretty_formula", "nsites", "volume", "energy_per_atom"] data = defaultdict(list) MAPI_KEY = os.environ.get("MAPI_KEY", "") with MPRester(MAPI_KEY) as mpr: for entry in mpr.query(criteria={"task_id": {"$in": mpids}}, properties=properties): for p in properties: data[p].append(entry[p]) df = pd.DataFrame(data, index=mpids) df.head(20) d =defaultdict(list) for formula in df.pretty_formula: comp = Composition(formula) rows = [] el_negs = [] weights = [] energies = [] for el in comp: rows.append(el.row) el_negs.append(el.X) weights.append(comp.get_atomic_fraction(el)) energies.append(get_aiab_energy(el)) d["rows_avg"].append(holder_mean(rows, weights, 1.0)) d["X_avg"].append(holder_mean(rows, weights, -4.0)) d["reference_energy"].append(np.average(energies, weights=weights)) df = append_data(df, d) df.head(20) df["log_volume_per_atom"] = np.log10(df.volume/df.nsites) df["energy_per_atom"] = df.energy_per_atom - df.reference_energy df.head(20) df.drop(["pretty_formula","volume", "nsites", "reference_energy"], axis=1, inplace=True) df.head(20) cols = df.columns.tolist() print(cols) cols = [cols[3], cols[2], cols[0], cols[1] ] df = df[cols] df.head(20) import gbml.core num_predictions = len(mpids) k_descriptors = df.values k_predictions = np.empty(num_predictions) # Make predictions k_filename = os.path.join(os.path.dirname(gbml.__file__), "data", "gbml-K-v1.00.data") gbml.core.predict(k_filename, num_predictions, k_descriptors, k_predictions) k_list = np.power(10.0, k_predictions).tolist() print k_list
0.516839
0.90053